~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ocfs2/dlm/dlmrecovery.c

Version: ~ [ linux-5.12-rc1 ] ~ [ linux-5.11.2 ] ~ [ linux-5.10.19 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.101 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.177 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.222 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.258 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.258 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* -*- mode: c; c-basic-offset: 8; -*-
  2  * vim: noexpandtab sw=8 ts=8 sts=0:
  3  *
  4  * dlmrecovery.c
  5  *
  6  * recovery stuff
  7  *
  8  * Copyright (C) 2004 Oracle.  All rights reserved.
  9  *
 10  * This program is free software; you can redistribute it and/or
 11  * modify it under the terms of the GNU General Public
 12  * License as published by the Free Software Foundation; either
 13  * version 2 of the License, or (at your option) any later version.
 14  *
 15  * This program is distributed in the hope that it will be useful,
 16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 18  * General Public License for more details.
 19  *
 20  * You should have received a copy of the GNU General Public
 21  * License along with this program; if not, write to the
 22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 23  * Boston, MA 021110-1307, USA.
 24  *
 25  */
 26 
 27 
 28 #include <linux/module.h>
 29 #include <linux/fs.h>
 30 #include <linux/types.h>
 31 #include <linux/slab.h>
 32 #include <linux/highmem.h>
 33 #include <linux/init.h>
 34 #include <linux/sysctl.h>
 35 #include <linux/random.h>
 36 #include <linux/blkdev.h>
 37 #include <linux/socket.h>
 38 #include <linux/inet.h>
 39 #include <linux/timer.h>
 40 #include <linux/kthread.h>
 41 #include <linux/delay.h>
 42 
 43 
 44 #include "cluster/heartbeat.h"
 45 #include "cluster/nodemanager.h"
 46 #include "cluster/tcp.h"
 47 
 48 #include "dlmapi.h"
 49 #include "dlmcommon.h"
 50 #include "dlmdomain.h"
 51 
 52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
 53 #include "cluster/masklog.h"
 54 
 55 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
 56 
 57 static int dlm_recovery_thread(void *data);
 58 static int dlm_do_recovery(struct dlm_ctxt *dlm);
 59 
 60 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
 61 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
 62 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
 63 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
 64                                  u8 request_from, u8 dead_node);
 65 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
 66 
 67 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
 68 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
 69                                         const char *lockname, int namelen,
 70                                         int total_locks, u64 cookie,
 71                                         u8 flags, u8 master);
 72 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
 73                                     struct dlm_migratable_lockres *mres,
 74                                     u8 send_to,
 75                                     struct dlm_lock_resource *res,
 76                                     int total_locks);
 77 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
 78                                      struct dlm_lock_resource *res,
 79                                      struct dlm_migratable_lockres *mres);
 80 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
 81 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
 82                                  u8 dead_node, u8 send_to);
 83 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
 84 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
 85                                         struct list_head *list, u8 dead_node);
 86 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
 87                                               u8 dead_node, u8 new_master);
 88 static void dlm_reco_ast(void *astdata);
 89 static void dlm_reco_bast(void *astdata, int blocked_type);
 90 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
 91 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
 92                                          void *data);
 93 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
 94 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
 95                                       struct dlm_lock_resource *res,
 96                                       u8 *real_master);
 97 
 98 static u64 dlm_get_next_mig_cookie(void);
 99 
100 static DEFINE_SPINLOCK(dlm_reco_state_lock);
101 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
102 static u64 dlm_mig_cookie = 1;
103 
104 static u64 dlm_get_next_mig_cookie(void)
105 {
106         u64 c;
107         spin_lock(&dlm_mig_cookie_lock);
108         c = dlm_mig_cookie;
109         if (dlm_mig_cookie == (~0ULL))
110                 dlm_mig_cookie = 1;
111         else
112                 dlm_mig_cookie++;
113         spin_unlock(&dlm_mig_cookie_lock);
114         return c;
115 }
116 
117 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
118                                           u8 dead_node)
119 {
120         assert_spin_locked(&dlm->spinlock);
121         if (dlm->reco.dead_node != dead_node)
122                 mlog(0, "%s: changing dead_node from %u to %u\n",
123                      dlm->name, dlm->reco.dead_node, dead_node);
124         dlm->reco.dead_node = dead_node;
125 }
126 
127 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
128                                        u8 master)
129 {
130         assert_spin_locked(&dlm->spinlock);
131         mlog(0, "%s: changing new_master from %u to %u\n",
132              dlm->name, dlm->reco.new_master, master);
133         dlm->reco.new_master = master;
134 }
135 
136 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
137 {
138         assert_spin_locked(&dlm->spinlock);
139         clear_bit(dlm->reco.dead_node, dlm->recovery_map);
140         dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
141         dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
142 }
143 
144 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
145 {
146         spin_lock(&dlm->spinlock);
147         __dlm_reset_recovery(dlm);
148         spin_unlock(&dlm->spinlock);
149 }
150 
151 /* Worker function used during recovery. */
152 void dlm_dispatch_work(struct work_struct *work)
153 {
154         struct dlm_ctxt *dlm =
155                 container_of(work, struct dlm_ctxt, dispatched_work);
156         LIST_HEAD(tmp_list);
157         struct dlm_work_item *item, *next;
158         dlm_workfunc_t *workfunc;
159         int tot=0;
160 
161         spin_lock(&dlm->work_lock);
162         list_splice_init(&dlm->work_list, &tmp_list);
163         spin_unlock(&dlm->work_lock);
164 
165         list_for_each_entry(item, &tmp_list, list) {
166                 tot++;
167         }
168         mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
169 
170         list_for_each_entry_safe(item, next, &tmp_list, list) {
171                 workfunc = item->func;
172                 list_del_init(&item->list);
173 
174                 /* already have ref on dlm to avoid having
175                  * it disappear.  just double-check. */
176                 BUG_ON(item->dlm != dlm);
177 
178                 /* this is allowed to sleep and
179                  * call network stuff */
180                 workfunc(item, item->data);
181 
182                 dlm_put(dlm);
183                 kfree(item);
184         }
185 }
186 
187 /*
188  * RECOVERY THREAD
189  */
190 
191 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
192 {
193         /* wake the recovery thread
194          * this will wake the reco thread in one of three places
195          * 1) sleeping with no recovery happening
196          * 2) sleeping with recovery mastered elsewhere
197          * 3) recovery mastered here, waiting on reco data */
198 
199         wake_up(&dlm->dlm_reco_thread_wq);
200 }
201 
202 /* Launch the recovery thread */
203 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
204 {
205         mlog(0, "starting dlm recovery thread...\n");
206 
207         dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
208                                                 "dlm_reco_thread");
209         if (IS_ERR(dlm->dlm_reco_thread_task)) {
210                 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
211                 dlm->dlm_reco_thread_task = NULL;
212                 return -EINVAL;
213         }
214 
215         return 0;
216 }
217 
218 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
219 {
220         if (dlm->dlm_reco_thread_task) {
221                 mlog(0, "waiting for dlm recovery thread to exit\n");
222                 kthread_stop(dlm->dlm_reco_thread_task);
223                 dlm->dlm_reco_thread_task = NULL;
224         }
225 }
226 
227 
228 
229 /*
230  * this is lame, but here's how recovery works...
231  * 1) all recovery threads cluster wide will work on recovering
232  *    ONE node at a time
233  * 2) negotiate who will take over all the locks for the dead node.
234  *    thats right... ALL the locks.
235  * 3) once a new master is chosen, everyone scans all locks
236  *    and moves aside those mastered by the dead guy
237  * 4) each of these locks should be locked until recovery is done
238  * 5) the new master collects up all of secondary lock queue info
239  *    one lock at a time, forcing each node to communicate back
240  *    before continuing
241  * 6) each secondary lock queue responds with the full known lock info
242  * 7) once the new master has run all its locks, it sends a ALLDONE!
243  *    message to everyone
244  * 8) upon receiving this message, the secondary queue node unlocks
245  *    and responds to the ALLDONE
246  * 9) once the new master gets responses from everyone, he unlocks
247  *    everything and recovery for this dead node is done
248  *10) go back to 2) while there are still dead nodes
249  *
250  */
251 
252 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
253 {
254         struct dlm_reco_node_data *ndata;
255         struct dlm_lock_resource *res;
256 
257         mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
258              dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
259              dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
260              dlm->reco.dead_node, dlm->reco.new_master);
261 
262         list_for_each_entry(ndata, &dlm->reco.node_data, list) {
263                 char *st = "unknown";
264                 switch (ndata->state) {
265                         case DLM_RECO_NODE_DATA_INIT:
266                                 st = "init";
267                                 break;
268                         case DLM_RECO_NODE_DATA_REQUESTING:
269                                 st = "requesting";
270                                 break;
271                         case DLM_RECO_NODE_DATA_DEAD:
272                                 st = "dead";
273                                 break;
274                         case DLM_RECO_NODE_DATA_RECEIVING:
275                                 st = "receiving";
276                                 break;
277                         case DLM_RECO_NODE_DATA_REQUESTED:
278                                 st = "requested";
279                                 break;
280                         case DLM_RECO_NODE_DATA_DONE:
281                                 st = "done";
282                                 break;
283                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
284                                 st = "finalize-sent";
285                                 break;
286                         default:
287                                 st = "bad";
288                                 break;
289                 }
290                 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
291                      dlm->name, ndata->node_num, st);
292         }
293         list_for_each_entry(res, &dlm->reco.resources, recovering) {
294                 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
295                      dlm->name, res->lockname.len, res->lockname.name);
296         }
297 }
298 
299 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
300 
301 static int dlm_recovery_thread(void *data)
302 {
303         int status;
304         struct dlm_ctxt *dlm = data;
305         unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
306 
307         mlog(0, "dlm thread running for %s...\n", dlm->name);
308 
309         while (!kthread_should_stop()) {
310                 if (dlm_domain_fully_joined(dlm)) {
311                         status = dlm_do_recovery(dlm);
312                         if (status == -EAGAIN) {
313                                 /* do not sleep, recheck immediately. */
314                                 continue;
315                         }
316                         if (status < 0)
317                                 mlog_errno(status);
318                 }
319 
320                 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
321                                                  kthread_should_stop(),
322                                                  timeout);
323         }
324 
325         mlog(0, "quitting DLM recovery thread\n");
326         return 0;
327 }
328 
329 /* returns true when the recovery master has contacted us */
330 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
331 {
332         int ready;
333         spin_lock(&dlm->spinlock);
334         ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
335         spin_unlock(&dlm->spinlock);
336         return ready;
337 }
338 
339 /* returns true if node is no longer in the domain
340  * could be dead or just not joined */
341 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
342 {
343         int dead;
344         spin_lock(&dlm->spinlock);
345         dead = !test_bit(node, dlm->domain_map);
346         spin_unlock(&dlm->spinlock);
347         return dead;
348 }
349 
350 /* returns true if node is no longer in the domain
351  * could be dead or just not joined */
352 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
353 {
354         int recovered;
355         spin_lock(&dlm->spinlock);
356         recovered = !test_bit(node, dlm->recovery_map);
357         spin_unlock(&dlm->spinlock);
358         return recovered;
359 }
360 
361 
362 void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
363 {
364         if (dlm_is_node_dead(dlm, node))
365                 return;
366 
367         printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in "
368                "domain %s\n", node, dlm->name);
369 
370         if (timeout)
371                 wait_event_timeout(dlm->dlm_reco_thread_wq,
372                                    dlm_is_node_dead(dlm, node),
373                                    msecs_to_jiffies(timeout));
374         else
375                 wait_event(dlm->dlm_reco_thread_wq,
376                            dlm_is_node_dead(dlm, node));
377 }
378 
379 void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
380 {
381         if (dlm_is_node_recovered(dlm, node))
382                 return;
383 
384         printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in "
385                "domain %s\n", node, dlm->name);
386 
387         if (timeout)
388                 wait_event_timeout(dlm->dlm_reco_thread_wq,
389                                    dlm_is_node_recovered(dlm, node),
390                                    msecs_to_jiffies(timeout));
391         else
392                 wait_event(dlm->dlm_reco_thread_wq,
393                            dlm_is_node_recovered(dlm, node));
394 }
395 
396 /* callers of the top-level api calls (dlmlock/dlmunlock) should
397  * block on the dlm->reco.event when recovery is in progress.
398  * the dlm recovery thread will set this state when it begins
399  * recovering a dead node (as the new master or not) and clear
400  * the state and wake as soon as all affected lock resources have
401  * been marked with the RECOVERY flag */
402 static int dlm_in_recovery(struct dlm_ctxt *dlm)
403 {
404         int in_recovery;
405         spin_lock(&dlm->spinlock);
406         in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
407         spin_unlock(&dlm->spinlock);
408         return in_recovery;
409 }
410 
411 
412 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
413 {
414         if (dlm_in_recovery(dlm)) {
415                 mlog(0, "%s: reco thread %d in recovery: "
416                      "state=%d, master=%u, dead=%u\n",
417                      dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
418                      dlm->reco.state, dlm->reco.new_master,
419                      dlm->reco.dead_node);
420         }
421         wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
422 }
423 
424 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
425 {
426         spin_lock(&dlm->spinlock);
427         BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
428         printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n",
429                dlm->name, dlm->reco.dead_node);
430         dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
431         spin_unlock(&dlm->spinlock);
432 }
433 
434 static void dlm_end_recovery(struct dlm_ctxt *dlm)
435 {
436         spin_lock(&dlm->spinlock);
437         BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
438         dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
439         spin_unlock(&dlm->spinlock);
440         printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name);
441         wake_up(&dlm->reco.event);
442 }
443 
444 static void dlm_print_recovery_master(struct dlm_ctxt *dlm)
445 {
446         printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the "
447                "dead node %u in domain %s\n", dlm->reco.new_master,
448                (dlm->node_num == dlm->reco.new_master ? "me" : "he"),
449                dlm->reco.dead_node, dlm->name);
450 }
451 
452 static int dlm_do_recovery(struct dlm_ctxt *dlm)
453 {
454         int status = 0;
455         int ret;
456 
457         spin_lock(&dlm->spinlock);
458 
459         /* check to see if the new master has died */
460         if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
461             test_bit(dlm->reco.new_master, dlm->recovery_map)) {
462                 mlog(0, "new master %u died while recovering %u!\n",
463                      dlm->reco.new_master, dlm->reco.dead_node);
464                 /* unset the new_master, leave dead_node */
465                 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
466         }
467 
468         /* select a target to recover */
469         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
470                 int bit;
471 
472                 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
473                 if (bit >= O2NM_MAX_NODES || bit < 0)
474                         dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
475                 else
476                         dlm_set_reco_dead_node(dlm, bit);
477         } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
478                 /* BUG? */
479                 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
480                      dlm->reco.dead_node);
481                 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
482         }
483 
484         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
485                 // mlog(0, "nothing to recover!  sleeping now!\n");
486                 spin_unlock(&dlm->spinlock);
487                 /* return to main thread loop and sleep. */
488                 return 0;
489         }
490         mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
491              dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
492              dlm->reco.dead_node);
493         spin_unlock(&dlm->spinlock);
494 
495         /* take write barrier */
496         /* (stops the list reshuffling thread, proxy ast handling) */
497         dlm_begin_recovery(dlm);
498 
499         if (dlm->reco.new_master == dlm->node_num)
500                 goto master_here;
501 
502         if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
503                 /* choose a new master, returns 0 if this node
504                  * is the master, -EEXIST if it's another node.
505                  * this does not return until a new master is chosen
506                  * or recovery completes entirely. */
507                 ret = dlm_pick_recovery_master(dlm);
508                 if (!ret) {
509                         /* already notified everyone.  go. */
510                         goto master_here;
511                 }
512                 mlog(0, "another node will master this recovery session.\n");
513         }
514 
515         dlm_print_recovery_master(dlm);
516 
517         /* it is safe to start everything back up here
518          * because all of the dead node's lock resources
519          * have been marked as in-recovery */
520         dlm_end_recovery(dlm);
521 
522         /* sleep out in main dlm_recovery_thread loop. */
523         return 0;
524 
525 master_here:
526         dlm_print_recovery_master(dlm);
527 
528         status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
529         if (status < 0) {
530                 /* we should never hit this anymore */
531                 mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, "
532                      "retrying.\n", dlm->name, status, dlm->reco.dead_node);
533                 /* yield a bit to allow any final network messages
534                  * to get handled on remaining nodes */
535                 msleep(100);
536         } else {
537                 /* success!  see if any other nodes need recovery */
538                 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
539                      dlm->name, dlm->reco.dead_node, dlm->node_num);
540                 spin_lock(&dlm->spinlock);
541                 __dlm_reset_recovery(dlm);
542                 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
543                 spin_unlock(&dlm->spinlock);
544         }
545         dlm_end_recovery(dlm);
546 
547         /* continue and look for another dead node */
548         return -EAGAIN;
549 }
550 
551 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
552 {
553         int status = 0;
554         struct dlm_reco_node_data *ndata;
555         int all_nodes_done;
556         int destroy = 0;
557         int pass = 0;
558 
559         do {
560                 /* we have become recovery master.  there is no escaping
561                  * this, so just keep trying until we get it. */
562                 status = dlm_init_recovery_area(dlm, dead_node);
563                 if (status < 0) {
564                         mlog(ML_ERROR, "%s: failed to alloc recovery area, "
565                              "retrying\n", dlm->name);
566                         msleep(1000);
567                 }
568         } while (status != 0);
569 
570         /* safe to access the node data list without a lock, since this
571          * process is the only one to change the list */
572         list_for_each_entry(ndata, &dlm->reco.node_data, list) {
573                 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
574                 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
575 
576                 mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
577                      ndata->node_num);
578 
579                 if (ndata->node_num == dlm->node_num) {
580                         ndata->state = DLM_RECO_NODE_DATA_DONE;
581                         continue;
582                 }
583 
584                 do {
585                         status = dlm_request_all_locks(dlm, ndata->node_num,
586                                                        dead_node);
587                         if (status < 0) {
588                                 mlog_errno(status);
589                                 if (dlm_is_host_down(status)) {
590                                         /* node died, ignore it for recovery */
591                                         status = 0;
592                                         ndata->state = DLM_RECO_NODE_DATA_DEAD;
593                                         /* wait for the domain map to catch up
594                                          * with the network state. */
595                                         wait_event_timeout(dlm->dlm_reco_thread_wq,
596                                                            dlm_is_node_dead(dlm,
597                                                                 ndata->node_num),
598                                                            msecs_to_jiffies(1000));
599                                         mlog(0, "waited 1 sec for %u, "
600                                              "dead? %s\n", ndata->node_num,
601                                              dlm_is_node_dead(dlm, ndata->node_num) ?
602                                              "yes" : "no");
603                                 } else {
604                                         /* -ENOMEM on the other node */
605                                         mlog(0, "%s: node %u returned "
606                                              "%d during recovery, retrying "
607                                              "after a short wait\n",
608                                              dlm->name, ndata->node_num,
609                                              status);
610                                         msleep(100);
611                                 }
612                         }
613                 } while (status != 0);
614 
615                 spin_lock(&dlm_reco_state_lock);
616                 switch (ndata->state) {
617                         case DLM_RECO_NODE_DATA_INIT:
618                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
619                         case DLM_RECO_NODE_DATA_REQUESTED:
620                                 BUG();
621                                 break;
622                         case DLM_RECO_NODE_DATA_DEAD:
623                                 mlog(0, "node %u died after requesting "
624                                      "recovery info for node %u\n",
625                                      ndata->node_num, dead_node);
626                                 /* fine.  don't need this node's info.
627                                  * continue without it. */
628                                 break;
629                         case DLM_RECO_NODE_DATA_REQUESTING:
630                                 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
631                                 mlog(0, "now receiving recovery data from "
632                                      "node %u for dead node %u\n",
633                                      ndata->node_num, dead_node);
634                                 break;
635                         case DLM_RECO_NODE_DATA_RECEIVING:
636                                 mlog(0, "already receiving recovery data from "
637                                      "node %u for dead node %u\n",
638                                      ndata->node_num, dead_node);
639                                 break;
640                         case DLM_RECO_NODE_DATA_DONE:
641                                 mlog(0, "already DONE receiving recovery data "
642                                      "from node %u for dead node %u\n",
643                                      ndata->node_num, dead_node);
644                                 break;
645                 }
646                 spin_unlock(&dlm_reco_state_lock);
647         }
648 
649         mlog(0, "%s: Done requesting all lock info\n", dlm->name);
650 
651         /* nodes should be sending reco data now
652          * just need to wait */
653 
654         while (1) {
655                 /* check all the nodes now to see if we are
656                  * done, or if anyone died */
657                 all_nodes_done = 1;
658                 spin_lock(&dlm_reco_state_lock);
659                 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
660                         mlog(0, "checking recovery state of node %u\n",
661                              ndata->node_num);
662                         switch (ndata->state) {
663                                 case DLM_RECO_NODE_DATA_INIT:
664                                 case DLM_RECO_NODE_DATA_REQUESTING:
665                                         mlog(ML_ERROR, "bad ndata state for "
666                                              "node %u: state=%d\n",
667                                              ndata->node_num, ndata->state);
668                                         BUG();
669                                         break;
670                                 case DLM_RECO_NODE_DATA_DEAD:
671                                         mlog(0, "node %u died after "
672                                              "requesting recovery info for "
673                                              "node %u\n", ndata->node_num,
674                                              dead_node);
675                                         break;
676                                 case DLM_RECO_NODE_DATA_RECEIVING:
677                                 case DLM_RECO_NODE_DATA_REQUESTED:
678                                         mlog(0, "%s: node %u still in state %s\n",
679                                              dlm->name, ndata->node_num,
680                                              ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
681                                              "receiving" : "requested");
682                                         all_nodes_done = 0;
683                                         break;
684                                 case DLM_RECO_NODE_DATA_DONE:
685                                         mlog(0, "%s: node %u state is done\n",
686                                              dlm->name, ndata->node_num);
687                                         break;
688                                 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
689                                         mlog(0, "%s: node %u state is finalize\n",
690                                              dlm->name, ndata->node_num);
691                                         break;
692                         }
693                 }
694                 spin_unlock(&dlm_reco_state_lock);
695 
696                 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
697                      all_nodes_done?"yes":"no");
698                 if (all_nodes_done) {
699                         int ret;
700 
701                         /* Set this flag on recovery master to avoid
702                          * a new recovery for another dead node start
703                          * before the recovery is not done. That may
704                          * cause recovery hung.*/
705                         spin_lock(&dlm->spinlock);
706                         dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
707                         spin_unlock(&dlm->spinlock);
708 
709                         /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
710                          * just send a finalize message to everyone and
711                          * clean up */
712                         mlog(0, "all nodes are done! send finalize\n");
713                         ret = dlm_send_finalize_reco_message(dlm);
714                         if (ret < 0)
715                                 mlog_errno(ret);
716 
717                         spin_lock(&dlm->spinlock);
718                         dlm_finish_local_lockres_recovery(dlm, dead_node,
719                                                           dlm->node_num);
720                         spin_unlock(&dlm->spinlock);
721                         mlog(0, "should be done with recovery!\n");
722 
723                         mlog(0, "finishing recovery of %s at %lu, "
724                              "dead=%u, this=%u, new=%u\n", dlm->name,
725                              jiffies, dlm->reco.dead_node,
726                              dlm->node_num, dlm->reco.new_master);
727                         destroy = 1;
728                         status = 0;
729                         /* rescan everything marked dirty along the way */
730                         dlm_kick_thread(dlm, NULL);
731                         break;
732                 }
733                 /* wait to be signalled, with periodic timeout
734                  * to check for node death */
735                 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
736                                          kthread_should_stop(),
737                                          msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
738 
739         }
740 
741         if (destroy)
742                 dlm_destroy_recovery_area(dlm, dead_node);
743 
744         return status;
745 }
746 
747 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
748 {
749         int num=0;
750         struct dlm_reco_node_data *ndata;
751 
752         spin_lock(&dlm->spinlock);
753         memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
754         /* nodes can only be removed (by dying) after dropping
755          * this lock, and death will be trapped later, so this should do */
756         spin_unlock(&dlm->spinlock);
757 
758         while (1) {
759                 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
760                 if (num >= O2NM_MAX_NODES) {
761                         break;
762                 }
763                 BUG_ON(num == dead_node);
764 
765                 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
766                 if (!ndata) {
767                         dlm_destroy_recovery_area(dlm, dead_node);
768                         return -ENOMEM;
769                 }
770                 ndata->node_num = num;
771                 ndata->state = DLM_RECO_NODE_DATA_INIT;
772                 spin_lock(&dlm_reco_state_lock);
773                 list_add_tail(&ndata->list, &dlm->reco.node_data);
774                 spin_unlock(&dlm_reco_state_lock);
775                 num++;
776         }
777 
778         return 0;
779 }
780 
781 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
782 {
783         struct dlm_reco_node_data *ndata, *next;
784         LIST_HEAD(tmplist);
785 
786         spin_lock(&dlm_reco_state_lock);
787         list_splice_init(&dlm->reco.node_data, &tmplist);
788         spin_unlock(&dlm_reco_state_lock);
789 
790         list_for_each_entry_safe(ndata, next, &tmplist, list) {
791                 list_del_init(&ndata->list);
792                 kfree(ndata);
793         }
794 }
795 
796 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
797                                  u8 dead_node)
798 {
799         struct dlm_lock_request lr;
800         int ret;
801         int status;
802 
803         mlog(0, "\n");
804 
805 
806         mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
807                   "to %u\n", dead_node, request_from);
808 
809         memset(&lr, 0, sizeof(lr));
810         lr.node_idx = dlm->node_num;
811         lr.dead_node = dead_node;
812 
813         // send message
814         ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
815                                  &lr, sizeof(lr), request_from, &status);
816 
817         /* negative status is handled by caller */
818         if (ret < 0)
819                 mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u "
820                      "to recover dead node %u\n", dlm->name, ret,
821                      request_from, dead_node);
822         else
823                 ret = status;
824         // return from here, then
825         // sleep until all received or error
826         return ret;
827 
828 }
829 
830 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
831                                   void **ret_data)
832 {
833         struct dlm_ctxt *dlm = data;
834         struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
835         char *buf = NULL;
836         struct dlm_work_item *item = NULL;
837 
838         if (!dlm_grab(dlm))
839                 return -EINVAL;
840 
841         if (lr->dead_node != dlm->reco.dead_node) {
842                 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
843                      "dead_node is %u\n", dlm->name, lr->node_idx,
844                      lr->dead_node, dlm->reco.dead_node);
845                 dlm_print_reco_node_status(dlm);
846                 /* this is a hack */
847                 dlm_put(dlm);
848                 return -ENOMEM;
849         }
850         BUG_ON(lr->dead_node != dlm->reco.dead_node);
851 
852         item = kzalloc(sizeof(*item), GFP_NOFS);
853         if (!item) {
854                 dlm_put(dlm);
855                 return -ENOMEM;
856         }
857 
858         /* this will get freed by dlm_request_all_locks_worker */
859         buf = (char *) __get_free_page(GFP_NOFS);
860         if (!buf) {
861                 kfree(item);
862                 dlm_put(dlm);
863                 return -ENOMEM;
864         }
865 
866         /* queue up work for dlm_request_all_locks_worker */
867         dlm_grab(dlm);  /* get an extra ref for the work item */
868         dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
869         item->u.ral.reco_master = lr->node_idx;
870         item->u.ral.dead_node = lr->dead_node;
871         spin_lock(&dlm->work_lock);
872         list_add_tail(&item->list, &dlm->work_list);
873         spin_unlock(&dlm->work_lock);
874         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
875 
876         dlm_put(dlm);
877         return 0;
878 }
879 
880 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
881 {
882         struct dlm_migratable_lockres *mres;
883         struct dlm_lock_resource *res;
884         struct dlm_ctxt *dlm;
885         LIST_HEAD(resources);
886         int ret;
887         u8 dead_node, reco_master;
888         int skip_all_done = 0;
889 
890         dlm = item->dlm;
891         dead_node = item->u.ral.dead_node;
892         reco_master = item->u.ral.reco_master;
893         mres = (struct dlm_migratable_lockres *)data;
894 
895         mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
896              dlm->name, dead_node, reco_master);
897 
898         if (dead_node != dlm->reco.dead_node ||
899             reco_master != dlm->reco.new_master) {
900                 /* worker could have been created before the recovery master
901                  * died.  if so, do not continue, but do not error. */
902                 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
903                         mlog(ML_NOTICE, "%s: will not send recovery state, "
904                              "recovery master %u died, thread=(dead=%u,mas=%u)"
905                              " current=(dead=%u,mas=%u)\n", dlm->name,
906                              reco_master, dead_node, reco_master,
907                              dlm->reco.dead_node, dlm->reco.new_master);
908                 } else {
909                         mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
910                              "master=%u), request(dead=%u, master=%u)\n",
911                              dlm->name, dlm->reco.dead_node,
912                              dlm->reco.new_master, dead_node, reco_master);
913                 }
914                 goto leave;
915         }
916 
917         /* lock resources should have already been moved to the
918          * dlm->reco.resources list.  now move items from that list
919          * to a temp list if the dead owner matches.  note that the
920          * whole cluster recovers only one node at a time, so we
921          * can safely move UNKNOWN lock resources for each recovery
922          * session. */
923         dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
924 
925         /* now we can begin blasting lockreses without the dlm lock */
926 
927         /* any errors returned will be due to the new_master dying,
928          * the dlm_reco_thread should detect this */
929         list_for_each_entry(res, &resources, recovering) {
930                 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
931                                         DLM_MRES_RECOVERY);
932                 if (ret < 0) {
933                         mlog(ML_ERROR, "%s: node %u went down while sending "
934                              "recovery state for dead node %u, ret=%d\n", dlm->name,
935                              reco_master, dead_node, ret);
936                         skip_all_done = 1;
937                         break;
938                 }
939         }
940 
941         /* move the resources back to the list */
942         spin_lock(&dlm->spinlock);
943         list_splice_init(&resources, &dlm->reco.resources);
944         spin_unlock(&dlm->spinlock);
945 
946         if (!skip_all_done) {
947                 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
948                 if (ret < 0) {
949                         mlog(ML_ERROR, "%s: node %u went down while sending "
950                              "recovery all-done for dead node %u, ret=%d\n",
951                              dlm->name, reco_master, dead_node, ret);
952                 }
953         }
954 leave:
955         free_page((unsigned long)data);
956 }
957 
958 
959 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
960 {
961         int ret, tmpret;
962         struct dlm_reco_data_done done_msg;
963 
964         memset(&done_msg, 0, sizeof(done_msg));
965         done_msg.node_idx = dlm->node_num;
966         done_msg.dead_node = dead_node;
967         mlog(0, "sending DATA DONE message to %u, "
968              "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
969              done_msg.dead_node);
970 
971         ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
972                                  sizeof(done_msg), send_to, &tmpret);
973         if (ret < 0) {
974                 mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u "
975                      "to recover dead node %u\n", dlm->name, ret, send_to,
976                      dead_node);
977                 if (!dlm_is_host_down(ret)) {
978                         BUG();
979                 }
980         } else
981                 ret = tmpret;
982         return ret;
983 }
984 
985 
986 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
987                                void **ret_data)
988 {
989         struct dlm_ctxt *dlm = data;
990         struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
991         struct dlm_reco_node_data *ndata = NULL;
992         int ret = -EINVAL;
993 
994         if (!dlm_grab(dlm))
995                 return -EINVAL;
996 
997         mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
998              "node_idx=%u, this node=%u\n", done->dead_node,
999              dlm->reco.dead_node, done->node_idx, dlm->node_num);
1000 
1001         mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
1002                         "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
1003                         "node_idx=%u, this node=%u\n", done->dead_node,
1004                         dlm->reco.dead_node, done->node_idx, dlm->node_num);
1005 
1006         spin_lock(&dlm_reco_state_lock);
1007         list_for_each_entry(ndata, &dlm->reco.node_data, list) {
1008                 if (ndata->node_num != done->node_idx)
1009                         continue;
1010 
1011                 switch (ndata->state) {
1012                         /* should have moved beyond INIT but not to FINALIZE yet */
1013                         case DLM_RECO_NODE_DATA_INIT:
1014                         case DLM_RECO_NODE_DATA_DEAD:
1015                         case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1016                                 mlog(ML_ERROR, "bad ndata state for node %u:"
1017                                      " state=%d\n", ndata->node_num,
1018                                      ndata->state);
1019                                 BUG();
1020                                 break;
1021                         /* these states are possible at this point, anywhere along
1022                          * the line of recovery */
1023                         case DLM_RECO_NODE_DATA_DONE:
1024                         case DLM_RECO_NODE_DATA_RECEIVING:
1025                         case DLM_RECO_NODE_DATA_REQUESTED:
1026                         case DLM_RECO_NODE_DATA_REQUESTING:
1027                                 mlog(0, "node %u is DONE sending "
1028                                           "recovery data!\n",
1029                                           ndata->node_num);
1030 
1031                                 ndata->state = DLM_RECO_NODE_DATA_DONE;
1032                                 ret = 0;
1033                                 break;
1034                 }
1035         }
1036         spin_unlock(&dlm_reco_state_lock);
1037 
1038         /* wake the recovery thread, some node is done */
1039         if (!ret)
1040                 dlm_kick_recovery_thread(dlm);
1041 
1042         if (ret < 0)
1043                 mlog(ML_ERROR, "failed to find recovery node data for node "
1044                      "%u\n", done->node_idx);
1045         dlm_put(dlm);
1046 
1047         mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1048         return ret;
1049 }
1050 
1051 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1052                                         struct list_head *list,
1053                                         u8 dead_node)
1054 {
1055         struct dlm_lock_resource *res, *next;
1056         struct dlm_lock *lock;
1057 
1058         spin_lock(&dlm->spinlock);
1059         list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1060                 /* always prune any $RECOVERY entries for dead nodes,
1061                  * otherwise hangs can occur during later recovery */
1062                 if (dlm_is_recovery_lock(res->lockname.name,
1063                                          res->lockname.len)) {
1064                         spin_lock(&res->spinlock);
1065                         list_for_each_entry(lock, &res->granted, list) {
1066                                 if (lock->ml.node == dead_node) {
1067                                         mlog(0, "AHA! there was "
1068                                              "a $RECOVERY lock for dead "
1069                                              "node %u (%s)!\n",
1070                                              dead_node, dlm->name);
1071                                         list_del_init(&lock->list);
1072                                         dlm_lock_put(lock);
1073                                         break;
1074                                 }
1075                         }
1076                         spin_unlock(&res->spinlock);
1077                         continue;
1078                 }
1079 
1080                 if (res->owner == dead_node) {
1081                         mlog(0, "found lockres owned by dead node while "
1082                                   "doing recovery for node %u. sending it.\n",
1083                                   dead_node);
1084                         list_move_tail(&res->recovering, list);
1085                 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1086                         mlog(0, "found UNKNOWN owner while doing recovery "
1087                                   "for node %u. sending it.\n", dead_node);
1088                         list_move_tail(&res->recovering, list);
1089                 }
1090         }
1091         spin_unlock(&dlm->spinlock);
1092 }
1093 
1094 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1095 {
1096         int total_locks = 0;
1097         struct list_head *iter, *queue = &res->granted;
1098         int i;
1099 
1100         for (i=0; i<3; i++) {
1101                 list_for_each(iter, queue)
1102                         total_locks++;
1103                 queue++;
1104         }
1105         return total_locks;
1106 }
1107 
1108 
1109 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1110                                       struct dlm_migratable_lockres *mres,
1111                                       u8 send_to,
1112                                       struct dlm_lock_resource *res,
1113                                       int total_locks)
1114 {
1115         u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1116         int mres_total_locks = be32_to_cpu(mres->total_locks);
1117         int sz, ret = 0, status = 0;
1118         u8 orig_flags = mres->flags,
1119            orig_master = mres->master;
1120 
1121         BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1122         if (!mres->num_locks)
1123                 return 0;
1124 
1125         sz = sizeof(struct dlm_migratable_lockres) +
1126                 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1127 
1128         /* add an all-done flag if we reached the last lock */
1129         orig_flags = mres->flags;
1130         BUG_ON(total_locks > mres_total_locks);
1131         if (total_locks == mres_total_locks)
1132                 mres->flags |= DLM_MRES_ALL_DONE;
1133 
1134         mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1135              dlm->name, res->lockname.len, res->lockname.name,
1136              orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery",
1137              send_to);
1138 
1139         /* send it */
1140         ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1141                                  sz, send_to, &status);
1142         if (ret < 0) {
1143                 /* XXX: negative status is not handled.
1144                  * this will end up killing this node. */
1145                 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1146                      "node %u (%s)\n", dlm->name, mres->lockname_len,
1147                      mres->lockname, ret, send_to,
1148                      (orig_flags & DLM_MRES_MIGRATION ?
1149                       "migration" : "recovery"));
1150         } else {
1151                 /* might get an -ENOMEM back here */
1152                 ret = status;
1153                 if (ret < 0) {
1154                         mlog_errno(ret);
1155 
1156                         if (ret == -EFAULT) {
1157                                 mlog(ML_ERROR, "node %u told me to kill "
1158                                      "myself!\n", send_to);
1159                                 BUG();
1160                         }
1161                 }
1162         }
1163 
1164         /* zero and reinit the message buffer */
1165         dlm_init_migratable_lockres(mres, res->lockname.name,
1166                                     res->lockname.len, mres_total_locks,
1167                                     mig_cookie, orig_flags, orig_master);
1168         return ret;
1169 }
1170 
1171 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1172                                         const char *lockname, int namelen,
1173                                         int total_locks, u64 cookie,
1174                                         u8 flags, u8 master)
1175 {
1176         /* mres here is one full page */
1177         clear_page(mres);
1178         mres->lockname_len = namelen;
1179         memcpy(mres->lockname, lockname, namelen);
1180         mres->num_locks = 0;
1181         mres->total_locks = cpu_to_be32(total_locks);
1182         mres->mig_cookie = cpu_to_be64(cookie);
1183         mres->flags = flags;
1184         mres->master = master;
1185 }
1186 
1187 static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
1188                                           struct dlm_migratable_lockres *mres,
1189                                           int queue)
1190 {
1191         if (!lock->lksb)
1192                return;
1193 
1194         /* Ignore lvb in all locks in the blocked list */
1195         if (queue == DLM_BLOCKED_LIST)
1196                 return;
1197 
1198         /* Only consider lvbs in locks with granted EX or PR lock levels */
1199         if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
1200                 return;
1201 
1202         if (dlm_lvb_is_empty(mres->lvb)) {
1203                 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1204                 return;
1205         }
1206 
1207         /* Ensure the lvb copied for migration matches in other valid locks */
1208         if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
1209                 return;
1210 
1211         mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
1212              "node=%u\n",
1213              dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
1214              dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
1215              lock->lockres->lockname.len, lock->lockres->lockname.name,
1216              lock->ml.node);
1217         dlm_print_one_lock_resource(lock->lockres);
1218         BUG();
1219 }
1220 
1221 /* returns 1 if this lock fills the network structure,
1222  * 0 otherwise */
1223 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1224                                  struct dlm_migratable_lockres *mres, int queue)
1225 {
1226         struct dlm_migratable_lock *ml;
1227         int lock_num = mres->num_locks;
1228 
1229         ml = &(mres->ml[lock_num]);
1230         ml->cookie = lock->ml.cookie;
1231         ml->type = lock->ml.type;
1232         ml->convert_type = lock->ml.convert_type;
1233         ml->highest_blocked = lock->ml.highest_blocked;
1234         ml->list = queue;
1235         if (lock->lksb) {
1236                 ml->flags = lock->lksb->flags;
1237                 dlm_prepare_lvb_for_migration(lock, mres, queue);
1238         }
1239         ml->node = lock->ml.node;
1240         mres->num_locks++;
1241         /* we reached the max, send this network message */
1242         if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1243                 return 1;
1244         return 0;
1245 }
1246 
1247 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1248                                struct dlm_migratable_lockres *mres)
1249 {
1250         struct dlm_lock dummy;
1251         memset(&dummy, 0, sizeof(dummy));
1252         dummy.ml.cookie = 0;
1253         dummy.ml.type = LKM_IVMODE;
1254         dummy.ml.convert_type = LKM_IVMODE;
1255         dummy.ml.highest_blocked = LKM_IVMODE;
1256         dummy.lksb = NULL;
1257         dummy.ml.node = dlm->node_num;
1258         dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1259 }
1260 
1261 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1262                                     struct dlm_migratable_lock *ml,
1263                                     u8 *nodenum)
1264 {
1265         if (unlikely(ml->cookie == 0 &&
1266             ml->type == LKM_IVMODE &&
1267             ml->convert_type == LKM_IVMODE &&
1268             ml->highest_blocked == LKM_IVMODE &&
1269             ml->list == DLM_BLOCKED_LIST)) {
1270                 *nodenum = ml->node;
1271                 return 1;
1272         }
1273         return 0;
1274 }
1275 
1276 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1277                          struct dlm_migratable_lockres *mres,
1278                          u8 send_to, u8 flags)
1279 {
1280         struct list_head *queue;
1281         int total_locks, i;
1282         u64 mig_cookie = 0;
1283         struct dlm_lock *lock;
1284         int ret = 0;
1285 
1286         BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1287 
1288         mlog(0, "sending to %u\n", send_to);
1289 
1290         total_locks = dlm_num_locks_in_lockres(res);
1291         if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1292                 /* rare, but possible */
1293                 mlog(0, "argh.  lockres has %d locks.  this will "
1294                           "require more than one network packet to "
1295                           "migrate\n", total_locks);
1296                 mig_cookie = dlm_get_next_mig_cookie();
1297         }
1298 
1299         dlm_init_migratable_lockres(mres, res->lockname.name,
1300                                     res->lockname.len, total_locks,
1301                                     mig_cookie, flags, res->owner);
1302 
1303         total_locks = 0;
1304         for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1305                 queue = dlm_list_idx_to_ptr(res, i);
1306                 list_for_each_entry(lock, queue, list) {
1307                         /* add another lock. */
1308                         total_locks++;
1309                         if (!dlm_add_lock_to_array(lock, mres, i))
1310                                 continue;
1311 
1312                         /* this filled the lock message,
1313                          * we must send it immediately. */
1314                         ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1315                                                        res, total_locks);
1316                         if (ret < 0)
1317                                 goto error;
1318                 }
1319         }
1320         if (total_locks == 0) {
1321                 /* send a dummy lock to indicate a mastery reference only */
1322                 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1323                      dlm->name, res->lockname.len, res->lockname.name,
1324                      send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1325                      "migration");
1326                 dlm_add_dummy_lock(dlm, mres);
1327         }
1328         /* flush any remaining locks */
1329         ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1330         if (ret < 0)
1331                 goto error;
1332         return ret;
1333 
1334 error:
1335         mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1336              dlm->name, ret);
1337         if (!dlm_is_host_down(ret))
1338                 BUG();
1339         mlog(0, "%s: node %u went down while sending %s "
1340              "lockres %.*s\n", dlm->name, send_to,
1341              flags & DLM_MRES_RECOVERY ?  "recovery" : "migration",
1342              res->lockname.len, res->lockname.name);
1343         return ret;
1344 }
1345 
1346 
1347 
1348 /*
1349  * this message will contain no more than one page worth of
1350  * recovery data, and it will work on only one lockres.
1351  * there may be many locks in this page, and we may need to wait
1352  * for additional packets to complete all the locks (rare, but
1353  * possible).
1354  */
1355 /*
1356  * NOTE: the allocation error cases here are scary
1357  * we really cannot afford to fail an alloc in recovery
1358  * do we spin?  returning an error only delays the problem really
1359  */
1360 
1361 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1362                             void **ret_data)
1363 {
1364         struct dlm_ctxt *dlm = data;
1365         struct dlm_migratable_lockres *mres =
1366                 (struct dlm_migratable_lockres *)msg->buf;
1367         int ret = 0;
1368         u8 real_master;
1369         u8 extra_refs = 0;
1370         char *buf = NULL;
1371         struct dlm_work_item *item = NULL;
1372         struct dlm_lock_resource *res = NULL;
1373 
1374         if (!dlm_grab(dlm))
1375                 return -EINVAL;
1376 
1377         BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1378 
1379         real_master = mres->master;
1380         if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1381                 /* cannot migrate a lockres with no master */
1382                 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1383         }
1384 
1385         mlog(0, "%s message received from node %u\n",
1386                   (mres->flags & DLM_MRES_RECOVERY) ?
1387                   "recovery" : "migration", mres->master);
1388         if (mres->flags & DLM_MRES_ALL_DONE)
1389                 mlog(0, "all done flag.  all lockres data received!\n");
1390 
1391         ret = -ENOMEM;
1392         buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1393         item = kzalloc(sizeof(*item), GFP_NOFS);
1394         if (!buf || !item)
1395                 goto leave;
1396 
1397         /* lookup the lock to see if we have a secondary queue for this
1398          * already...  just add the locks in and this will have its owner
1399          * and RECOVERY flag changed when it completes. */
1400         res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1401         if (res) {
1402                 /* this will get a ref on res */
1403                 /* mark it as recovering/migrating and hash it */
1404                 spin_lock(&res->spinlock);
1405                 if (mres->flags & DLM_MRES_RECOVERY) {
1406                         res->state |= DLM_LOCK_RES_RECOVERING;
1407                 } else {
1408                         if (res->state & DLM_LOCK_RES_MIGRATING) {
1409                                 /* this is at least the second
1410                                  * lockres message */
1411                                 mlog(0, "lock %.*s is already migrating\n",
1412                                           mres->lockname_len,
1413                                           mres->lockname);
1414                         } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1415                                 /* caller should BUG */
1416                                 mlog(ML_ERROR, "node is attempting to migrate "
1417                                      "lock %.*s, but marked as recovering!\n",
1418                                      mres->lockname_len, mres->lockname);
1419                                 ret = -EFAULT;
1420                                 spin_unlock(&res->spinlock);
1421                                 dlm_lockres_put(res);
1422                                 goto leave;
1423                         }
1424                         res->state |= DLM_LOCK_RES_MIGRATING;
1425                 }
1426                 spin_unlock(&res->spinlock);
1427         } else {
1428                 /* need to allocate, just like if it was
1429                  * mastered here normally  */
1430                 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1431                 if (!res)
1432                         goto leave;
1433 
1434                 /* to match the ref that we would have gotten if
1435                  * dlm_lookup_lockres had succeeded */
1436                 dlm_lockres_get(res);
1437 
1438                 /* mark it as recovering/migrating and hash it */
1439                 if (mres->flags & DLM_MRES_RECOVERY)
1440                         res->state |= DLM_LOCK_RES_RECOVERING;
1441                 else
1442                         res->state |= DLM_LOCK_RES_MIGRATING;
1443 
1444                 spin_lock(&dlm->spinlock);
1445                 __dlm_insert_lockres(dlm, res);
1446                 spin_unlock(&dlm->spinlock);
1447 
1448                 /* Add an extra ref for this lock-less lockres lest the
1449                  * dlm_thread purges it before we get the chance to add
1450                  * locks to it */
1451                 dlm_lockres_get(res);
1452 
1453                 /* There are three refs that need to be put.
1454                  * 1. Taken above.
1455                  * 2. kref_init in dlm_new_lockres()->dlm_init_lockres().
1456                  * 3. dlm_lookup_lockres()
1457                  * The first one is handled at the end of this function. The
1458                  * other two are handled in the worker thread after locks have
1459                  * been attached. Yes, we don't wait for purge time to match
1460                  * kref_init. The lockres will still have atleast one ref
1461                  * added because it is in the hash __dlm_insert_lockres() */
1462                 extra_refs++;
1463 
1464                 /* now that the new lockres is inserted,
1465                  * make it usable by other processes */
1466                 spin_lock(&res->spinlock);
1467                 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1468                 spin_unlock(&res->spinlock);
1469                 wake_up(&res->wq);
1470         }
1471 
1472         /* at this point we have allocated everything we need,
1473          * and we have a hashed lockres with an extra ref and
1474          * the proper res->state flags. */
1475         ret = 0;
1476         spin_lock(&res->spinlock);
1477         /* drop this either when master requery finds a different master
1478          * or when a lock is added by the recovery worker */
1479         dlm_lockres_grab_inflight_ref(dlm, res);
1480         if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1481                 /* migration cannot have an unknown master */
1482                 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1483                 mlog(0, "recovery has passed me a lockres with an "
1484                           "unknown owner.. will need to requery: "
1485                           "%.*s\n", mres->lockname_len, mres->lockname);
1486         } else {
1487                 /* take a reference now to pin the lockres, drop it
1488                  * when locks are added in the worker */
1489                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1490         }
1491         spin_unlock(&res->spinlock);
1492 
1493         /* queue up work for dlm_mig_lockres_worker */
1494         dlm_grab(dlm);  /* get an extra ref for the work item */
1495         memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */
1496         dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1497         item->u.ml.lockres = res; /* already have a ref */
1498         item->u.ml.real_master = real_master;
1499         item->u.ml.extra_ref = extra_refs;
1500         spin_lock(&dlm->work_lock);
1501         list_add_tail(&item->list, &dlm->work_list);
1502         spin_unlock(&dlm->work_lock);
1503         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1504 
1505 leave:
1506         /* One extra ref taken needs to be put here */
1507         if (extra_refs)
1508                 dlm_lockres_put(res);
1509 
1510         dlm_put(dlm);
1511         if (ret < 0) {
1512                 kfree(buf);
1513                 kfree(item);
1514                 mlog_errno(ret);
1515         }
1516 
1517         return ret;
1518 }
1519 
1520 
1521 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1522 {
1523         struct dlm_ctxt *dlm;
1524         struct dlm_migratable_lockres *mres;
1525         int ret = 0;
1526         struct dlm_lock_resource *res;
1527         u8 real_master;
1528         u8 extra_ref;
1529 
1530         dlm = item->dlm;
1531         mres = (struct dlm_migratable_lockres *)data;
1532 
1533         res = item->u.ml.lockres;
1534         real_master = item->u.ml.real_master;
1535         extra_ref = item->u.ml.extra_ref;
1536 
1537         if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1538                 /* this case is super-rare. only occurs if
1539                  * node death happens during migration. */
1540 again:
1541                 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1542                 if (ret < 0) {
1543                         mlog(0, "dlm_lockres_master_requery ret=%d\n",
1544                                   ret);
1545                         goto again;
1546                 }
1547                 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1548                         mlog(0, "lockres %.*s not claimed.  "
1549                                    "this node will take it.\n",
1550                                    res->lockname.len, res->lockname.name);
1551                 } else {
1552                         spin_lock(&res->spinlock);
1553                         dlm_lockres_drop_inflight_ref(dlm, res);
1554                         spin_unlock(&res->spinlock);
1555                         mlog(0, "master needs to respond to sender "
1556                                   "that node %u still owns %.*s\n",
1557                                   real_master, res->lockname.len,
1558                                   res->lockname.name);
1559                         /* cannot touch this lockres */
1560                         goto leave;
1561                 }
1562         }
1563 
1564         ret = dlm_process_recovery_data(dlm, res, mres);
1565         if (ret < 0)
1566                 mlog(0, "dlm_process_recovery_data returned  %d\n", ret);
1567         else
1568                 mlog(0, "dlm_process_recovery_data succeeded\n");
1569 
1570         if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1571                            (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1572                 ret = dlm_finish_migration(dlm, res, mres->master);
1573                 if (ret < 0)
1574                         mlog_errno(ret);
1575         }
1576 
1577 leave:
1578         /* See comment in dlm_mig_lockres_handler() */
1579         if (res) {
1580                 if (extra_ref)
1581                         dlm_lockres_put(res);
1582                 dlm_lockres_put(res);
1583         }
1584         kfree(data);
1585 }
1586 
1587 
1588 
1589 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1590                                       struct dlm_lock_resource *res,
1591                                       u8 *real_master)
1592 {
1593         struct dlm_node_iter iter;
1594         int nodenum;
1595         int ret = 0;
1596 
1597         *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1598 
1599         /* we only reach here if one of the two nodes in a
1600          * migration died while the migration was in progress.
1601          * at this point we need to requery the master.  we
1602          * know that the new_master got as far as creating
1603          * an mle on at least one node, but we do not know
1604          * if any nodes had actually cleared the mle and set
1605          * the master to the new_master.  the old master
1606          * is supposed to set the owner to UNKNOWN in the
1607          * event of a new_master death, so the only possible
1608          * responses that we can get from nodes here are
1609          * that the master is new_master, or that the master
1610          * is UNKNOWN.
1611          * if all nodes come back with UNKNOWN then we know
1612          * the lock needs remastering here.
1613          * if any node comes back with a valid master, check
1614          * to see if that master is the one that we are
1615          * recovering.  if so, then the new_master died and
1616          * we need to remaster this lock.  if not, then the
1617          * new_master survived and that node will respond to
1618          * other nodes about the owner.
1619          * if there is an owner, this node needs to dump this
1620          * lockres and alert the sender that this lockres
1621          * was rejected. */
1622         spin_lock(&dlm->spinlock);
1623         dlm_node_iter_init(dlm->domain_map, &iter);
1624         spin_unlock(&dlm->spinlock);
1625 
1626         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1627                 /* do not send to self */
1628                 if (nodenum == dlm->node_num)
1629                         continue;
1630                 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1631                 if (ret < 0) {
1632                         mlog_errno(ret);
1633                         if (!dlm_is_host_down(ret))
1634                                 BUG();
1635                         /* host is down, so answer for that node would be
1636                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
1637                 }
1638                 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1639                         mlog(0, "lock master is %u\n", *real_master);
1640                         break;
1641                 }
1642         }
1643         return ret;
1644 }
1645 
1646 
1647 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1648                           u8 nodenum, u8 *real_master)
1649 {
1650         int ret = -EINVAL;
1651         struct dlm_master_requery req;
1652         int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1653 
1654         memset(&req, 0, sizeof(req));
1655         req.node_idx = dlm->node_num;
1656         req.namelen = res->lockname.len;
1657         memcpy(req.name, res->lockname.name, res->lockname.len);
1658 
1659         ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1660                                  &req, sizeof(req), nodenum, &status);
1661         /* XXX: negative status not handled properly here. */
1662         if (ret < 0)
1663                 mlog(ML_ERROR, "Error %d when sending message %u (key "
1664                      "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG,
1665                      dlm->key, nodenum);
1666         else {
1667                 BUG_ON(status < 0);
1668                 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1669                 *real_master = (u8) (status & 0xff);
1670                 mlog(0, "node %u responded to master requery with %u\n",
1671                           nodenum, *real_master);
1672                 ret = 0;
1673         }
1674         return ret;
1675 }
1676 
1677 
1678 /* this function cannot error, so unless the sending
1679  * or receiving of the message failed, the owner can
1680  * be trusted */
1681 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1682                                void **ret_data)
1683 {
1684         struct dlm_ctxt *dlm = data;
1685         struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1686         struct dlm_lock_resource *res = NULL;
1687         unsigned int hash;
1688         int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1689         u32 flags = DLM_ASSERT_MASTER_REQUERY;
1690 
1691         if (!dlm_grab(dlm)) {
1692                 /* since the domain has gone away on this
1693                  * node, the proper response is UNKNOWN */
1694                 return master;
1695         }
1696 
1697         hash = dlm_lockid_hash(req->name, req->namelen);
1698 
1699         spin_lock(&dlm->spinlock);
1700         res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1701         if (res) {
1702                 spin_lock(&res->spinlock);
1703                 master = res->owner;
1704                 if (master == dlm->node_num) {
1705                         int ret = dlm_dispatch_assert_master(dlm, res,
1706                                                              0, 0, flags);
1707                         if (ret < 0) {
1708                                 mlog_errno(-ENOMEM);
1709                                 /* retry!? */
1710                                 BUG();
1711                         }
1712                 } else /* put.. incase we are not the master */
1713                         dlm_lockres_put(res);
1714                 spin_unlock(&res->spinlock);
1715         }
1716         spin_unlock(&dlm->spinlock);
1717 
1718         dlm_put(dlm);
1719         return master;
1720 }
1721 
1722 static inline struct list_head *
1723 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1724 {
1725         struct list_head *ret;
1726         BUG_ON(list_num < 0);
1727         BUG_ON(list_num > 2);
1728         ret = &(res->granted);
1729         ret += list_num;
1730         return ret;
1731 }
1732 /* TODO: do ast flush business
1733  * TODO: do MIGRATING and RECOVERING spinning
1734  */
1735 
1736 /*
1737 * NOTE about in-flight requests during migration:
1738 *
1739 * Before attempting the migrate, the master has marked the lockres as
1740 * MIGRATING and then flushed all of its pending ASTS.  So any in-flight
1741 * requests either got queued before the MIGRATING flag got set, in which
1742 * case the lock data will reflect the change and a return message is on
1743 * the way, or the request failed to get in before MIGRATING got set.  In
1744 * this case, the caller will be told to spin and wait for the MIGRATING
1745 * flag to be dropped, then recheck the master.
1746 * This holds true for the convert, cancel and unlock cases, and since lvb
1747 * updates are tied to these same messages, it applies to lvb updates as
1748 * well.  For the lock case, there is no way a lock can be on the master
1749 * queue and not be on the secondary queue since the lock is always added
1750 * locally first.  This means that the new target node will never be sent
1751 * a lock that he doesn't already have on the list.
1752 * In total, this means that the local lock is correct and should not be
1753 * updated to match the one sent by the master.  Any messages sent back
1754 * from the master before the MIGRATING flag will bring the lock properly
1755 * up-to-date, and the change will be ordered properly for the waiter.
1756 * We will *not* attempt to modify the lock underneath the waiter.
1757 */
1758 
1759 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1760                                      struct dlm_lock_resource *res,
1761                                      struct dlm_migratable_lockres *mres)
1762 {
1763         struct dlm_migratable_lock *ml;
1764         struct list_head *queue, *iter;
1765         struct list_head *tmpq = NULL;
1766         struct dlm_lock *newlock = NULL;
1767         struct dlm_lockstatus *lksb = NULL;
1768         int ret = 0;
1769         int i, j, bad;
1770         struct dlm_lock *lock;
1771         u8 from = O2NM_MAX_NODES;
1772         unsigned int added = 0;
1773         __be64 c;
1774 
1775         mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1776         for (i=0; i<mres->num_locks; i++) {
1777                 ml = &(mres->ml[i]);
1778 
1779                 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1780                         /* placeholder, just need to set the refmap bit */
1781                         BUG_ON(mres->num_locks != 1);
1782                         mlog(0, "%s:%.*s: dummy lock for %u\n",
1783                              dlm->name, mres->lockname_len, mres->lockname,
1784                              from);
1785                         spin_lock(&res->spinlock);
1786                         dlm_lockres_set_refmap_bit(dlm, res, from);
1787                         spin_unlock(&res->spinlock);
1788                         added++;
1789                         break;
1790                 }
1791                 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1792                 newlock = NULL;
1793                 lksb = NULL;
1794 
1795                 queue = dlm_list_num_to_pointer(res, ml->list);
1796                 tmpq = NULL;
1797 
1798                 /* if the lock is for the local node it needs to
1799                  * be moved to the proper location within the queue.
1800                  * do not allocate a new lock structure. */
1801                 if (ml->node == dlm->node_num) {
1802                         /* MIGRATION ONLY! */
1803                         BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1804 
1805                         lock = NULL;
1806                         spin_lock(&res->spinlock);
1807                         for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1808                                 tmpq = dlm_list_idx_to_ptr(res, j);
1809                                 list_for_each(iter, tmpq) {
1810                                         lock = list_entry(iter,
1811                                                   struct dlm_lock, list);
1812                                         if (lock->ml.cookie == ml->cookie)
1813                                                 break;
1814                                         lock = NULL;
1815                                 }
1816                                 if (lock)
1817                                         break;
1818                         }
1819 
1820                         /* lock is always created locally first, and
1821                          * destroyed locally last.  it must be on the list */
1822                         if (!lock) {
1823                                 c = ml->cookie;
1824                                 mlog(ML_ERROR, "Could not find local lock "
1825                                                "with cookie %u:%llu, node %u, "
1826                                                "list %u, flags 0x%x, type %d, "
1827                                                "conv %d, highest blocked %d\n",
1828                                      dlm_get_lock_cookie_node(be64_to_cpu(c)),
1829                                      dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1830                                      ml->node, ml->list, ml->flags, ml->type,
1831                                      ml->convert_type, ml->highest_blocked);
1832                                 __dlm_print_one_lock_resource(res);
1833                                 BUG();
1834                         }
1835 
1836                         if (lock->ml.node != ml->node) {
1837                                 c = lock->ml.cookie;
1838                                 mlog(ML_ERROR, "Mismatched node# in lock "
1839                                      "cookie %u:%llu, name %.*s, node %u\n",
1840                                      dlm_get_lock_cookie_node(be64_to_cpu(c)),
1841                                      dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1842                                      res->lockname.len, res->lockname.name,
1843                                      lock->ml.node);
1844                                 c = ml->cookie;
1845                                 mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
1846                                      "node %u, list %u, flags 0x%x, type %d, "
1847                                      "conv %d, highest blocked %d\n",
1848                                      dlm_get_lock_cookie_node(be64_to_cpu(c)),
1849                                      dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1850                                      ml->node, ml->list, ml->flags, ml->type,
1851                                      ml->convert_type, ml->highest_blocked);
1852                                 __dlm_print_one_lock_resource(res);
1853                                 BUG();
1854                         }
1855 
1856                         if (tmpq != queue) {
1857                                 c = ml->cookie;
1858                                 mlog(0, "Lock cookie %u:%llu was on list %u "
1859                                      "instead of list %u for %.*s\n",
1860                                      dlm_get_lock_cookie_node(be64_to_cpu(c)),
1861                                      dlm_get_lock_cookie_seq(be64_to_cpu(c)),
1862                                      j, ml->list, res->lockname.len,
1863                                      res->lockname.name);
1864                                 __dlm_print_one_lock_resource(res);
1865                                 spin_unlock(&res->spinlock);
1866                                 continue;
1867                         }
1868 
1869                         /* see NOTE above about why we do not update
1870                          * to match the master here */
1871 
1872                         /* move the lock to its proper place */
1873                         /* do not alter lock refcount.  switching lists. */
1874                         list_move_tail(&lock->list, queue);
1875                         spin_unlock(&res->spinlock);
1876                         added++;
1877 
1878                         mlog(0, "just reordered a local lock!\n");
1879                         continue;
1880                 }
1881 
1882                 /* lock is for another node. */
1883                 newlock = dlm_new_lock(ml->type, ml->node,
1884                                        be64_to_cpu(ml->cookie), NULL);
1885                 if (!newlock) {
1886                         ret = -ENOMEM;
1887                         goto leave;
1888                 }
1889                 lksb = newlock->lksb;
1890                 dlm_lock_attach_lockres(newlock, res);
1891 
1892                 if (ml->convert_type != LKM_IVMODE) {
1893                         BUG_ON(queue != &res->converting);
1894                         newlock->ml.convert_type = ml->convert_type;
1895                 }
1896                 lksb->flags |= (ml->flags &
1897                                 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1898 
1899                 if (ml->type == LKM_NLMODE)
1900                         goto skip_lvb;
1901 
1902                 /*
1903                  * If the lock is in the blocked list it can't have a valid lvb,
1904                  * so skip it
1905                  */
1906                 if (ml->list == DLM_BLOCKED_LIST)
1907                         goto skip_lvb;
1908 
1909                 if (!dlm_lvb_is_empty(mres->lvb)) {
1910                         if (lksb->flags & DLM_LKSB_PUT_LVB) {
1911                                 /* other node was trying to update
1912                                  * lvb when node died.  recreate the
1913                                  * lksb with the updated lvb. */
1914                                 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1915                                 /* the lock resource lvb update must happen
1916                                  * NOW, before the spinlock is dropped.
1917                                  * we no longer wait for the AST to update
1918                                  * the lvb. */
1919                                 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1920                         } else {
1921                                 /* otherwise, the node is sending its
1922                                  * most recent valid lvb info */
1923                                 BUG_ON(ml->type != LKM_EXMODE &&
1924                                        ml->type != LKM_PRMODE);
1925                                 if (!dlm_lvb_is_empty(res->lvb) &&
1926                                     (ml->type == LKM_EXMODE ||
1927                                      memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1928                                         int i;
1929                                         mlog(ML_ERROR, "%s:%.*s: received bad "
1930                                              "lvb! type=%d\n", dlm->name,
1931                                              res->lockname.len,
1932                                              res->lockname.name, ml->type);
1933                                         printk("lockres lvb=[");
1934                                         for (i=0; i<DLM_LVB_LEN; i++)
1935                                                 printk("%02x", res->lvb[i]);
1936                                         printk("]\nmigrated lvb=[");
1937                                         for (i=0; i<DLM_LVB_LEN; i++)
1938                                                 printk("%02x", mres->lvb[i]);
1939                                         printk("]\n");
1940                                         dlm_print_one_lock_resource(res);
1941                                         BUG();
1942                                 }
1943                                 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1944                         }
1945                 }
1946 skip_lvb:
1947 
1948                 /* NOTE:
1949                  * wrt lock queue ordering and recovery:
1950                  *    1. order of locks on granted queue is
1951                  *       meaningless.
1952                  *    2. order of locks on converting queue is
1953                  *       LOST with the node death.  sorry charlie.
1954                  *    3. order of locks on the blocked queue is
1955                  *       also LOST.
1956                  * order of locks does not affect integrity, it
1957                  * just means that a lock request may get pushed
1958                  * back in line as a result of the node death.
1959                  * also note that for a given node the lock order
1960                  * for its secondary queue locks is preserved
1961                  * relative to each other, but clearly *not*
1962                  * preserved relative to locks from other nodes.
1963                  */
1964                 bad = 0;
1965                 spin_lock(&res->spinlock);
1966                 list_for_each_entry(lock, queue, list) {
1967                         if (lock->ml.cookie == ml->cookie) {
1968                                 c = lock->ml.cookie;
1969                                 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1970                                      "exists on this lockres!\n", dlm->name,
1971                                      res->lockname.len, res->lockname.name,
1972                                      dlm_get_lock_cookie_node(be64_to_cpu(c)),
1973                                      dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1974 
1975                                 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1976                                      "node=%u, cookie=%u:%llu, queue=%d\n",
1977                                      ml->type, ml->convert_type, ml->node,
1978                                      dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1979                                      dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1980                                      ml->list);
1981 
1982                                 __dlm_print_one_lock_resource(res);
1983                                 bad = 1;
1984                                 break;
1985                         }
1986                 }
1987                 if (!bad) {
1988                         dlm_lock_get(newlock);
1989                         list_add_tail(&newlock->list, queue);
1990                         mlog(0, "%s:%.*s: added lock for node %u, "
1991                              "setting refmap bit\n", dlm->name,
1992                              res->lockname.len, res->lockname.name, ml->node);
1993                         dlm_lockres_set_refmap_bit(dlm, res, ml->node);
1994                         added++;
1995                 }
1996                 spin_unlock(&res->spinlock);
1997         }
1998         mlog(0, "done running all the locks\n");
1999 
2000 leave:
2001         /* balance the ref taken when the work was queued */
2002         spin_lock(&res->spinlock);
2003         dlm_lockres_drop_inflight_ref(dlm, res);
2004         spin_unlock(&res->spinlock);
2005 
2006         if (ret < 0) {
2007                 mlog_errno(ret);
2008                 if (newlock)
2009                         dlm_lock_put(newlock);
2010         }
2011 
2012         return ret;
2013 }
2014 
2015 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
2016                                        struct dlm_lock_resource *res)
2017 {
2018         int i;
2019         struct list_head *queue;
2020         struct dlm_lock *lock, *next;
2021 
2022         assert_spin_locked(&dlm->spinlock);
2023         assert_spin_locked(&res->spinlock);
2024         res->state |= DLM_LOCK_RES_RECOVERING;
2025         if (!list_empty(&res->recovering)) {
2026                 mlog(0,
2027                      "Recovering res %s:%.*s, is already on recovery list!\n",
2028                      dlm->name, res->lockname.len, res->lockname.name);
2029                 list_del_init(&res->recovering);
2030                 dlm_lockres_put(res);
2031         }
2032         /* We need to hold a reference while on the recovery list */
2033         dlm_lockres_get(res);
2034         list_add_tail(&res->recovering, &dlm->reco.resources);
2035 
2036         /* find any pending locks and put them back on proper list */
2037         for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
2038                 queue = dlm_list_idx_to_ptr(res, i);
2039                 list_for_each_entry_safe(lock, next, queue, list) {
2040                         dlm_lock_get(lock);
2041                         if (lock->convert_pending) {
2042                                 /* move converting lock back to granted */
2043                                 mlog(0, "node died with convert pending "
2044                                      "on %.*s. move back to granted list.\n",
2045                                      res->lockname.len, res->lockname.name);
2046                                 dlm_revert_pending_convert(res, lock);
2047                                 lock->convert_pending = 0;
2048                         } else if (lock->lock_pending) {
2049                                 /* remove pending lock requests completely */
2050                                 BUG_ON(i != DLM_BLOCKED_LIST);
2051                                 mlog(0, "node died with lock pending "
2052                                      "on %.*s. remove from blocked list and skip.\n",
2053                                      res->lockname.len, res->lockname.name);
2054                                 /* lock will be floating until ref in
2055                                  * dlmlock_remote is freed after the network
2056                                  * call returns.  ok for it to not be on any
2057                                  * list since no ast can be called
2058                                  * (the master is dead). */
2059                                 dlm_revert_pending_lock(res, lock);
2060                                 lock->lock_pending = 0;
2061                         } else if (lock->unlock_pending) {
2062                                 /* if an unlock was in progress, treat as
2063                                  * if this had completed successfully
2064                                  * before sending this lock state to the
2065                                  * new master.  note that the dlm_unlock
2066                                  * call is still responsible for calling
2067                                  * the unlockast.  that will happen after
2068                                  * the network call times out.  for now,
2069                                  * just move lists to prepare the new
2070                                  * recovery master.  */
2071                                 BUG_ON(i != DLM_GRANTED_LIST);
2072                                 mlog(0, "node died with unlock pending "
2073                                      "on %.*s. remove from blocked list and skip.\n",
2074                                      res->lockname.len, res->lockname.name);
2075                                 dlm_commit_pending_unlock(res, lock);
2076                                 lock->unlock_pending = 0;
2077                         } else if (lock->cancel_pending) {
2078                                 /* if a cancel was in progress, treat as
2079                                  * if this had completed successfully
2080                                  * before sending this lock state to the
2081                                  * new master */
2082                                 BUG_ON(i != DLM_CONVERTING_LIST);
2083                                 mlog(0, "node died with cancel pending "
2084                                      "on %.*s. move back to granted list.\n",
2085                                      res->lockname.len, res->lockname.name);
2086                                 dlm_commit_pending_cancel(res, lock);
2087                                 lock->cancel_pending = 0;
2088                         }
2089                         dlm_lock_put(lock);
2090                 }
2091         }
2092 }
2093 
2094 
2095 
2096 /* removes all recovered locks from the recovery list.
2097  * sets the res->owner to the new master.
2098  * unsets the RECOVERY flag and wakes waiters. */
2099 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2100                                               u8 dead_node, u8 new_master)
2101 {
2102         int i;
2103         struct hlist_head *bucket;
2104         struct dlm_lock_resource *res, *next;
2105 
2106         assert_spin_locked(&dlm->spinlock);
2107 
2108         list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2109                 if (res->owner == dead_node) {
2110                         mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2111                              dlm->name, res->lockname.len, res->lockname.name,
2112                              res->owner, new_master);
2113                         list_del_init(&res->recovering);
2114                         spin_lock(&res->spinlock);
2115                         /* new_master has our reference from
2116                          * the lock state sent during recovery */
2117                         dlm_change_lockres_owner(dlm, res, new_master);
2118                         res->state &= ~DLM_LOCK_RES_RECOVERING;
2119                         if (__dlm_lockres_has_locks(res))
2120                                 __dlm_dirty_lockres(dlm, res);
2121                         spin_unlock(&res->spinlock);
2122                         wake_up(&res->wq);
2123                         dlm_lockres_put(res);
2124                 }
2125         }
2126 
2127         /* this will become unnecessary eventually, but
2128          * for now we need to run the whole hash, clear
2129          * the RECOVERING state and set the owner
2130          * if necessary */
2131         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2132                 bucket = dlm_lockres_hash(dlm, i);
2133                 hlist_for_each_entry(res, bucket, hash_node) {
2134                         if (!(res->state & DLM_LOCK_RES_RECOVERING))
2135                                 continue;
2136 
2137                         if (res->owner != dead_node &&
2138                             res->owner != dlm->node_num)
2139                                 continue;
2140 
2141                         if (!list_empty(&res->recovering)) {
2142                                 list_del_init(&res->recovering);
2143                                 dlm_lockres_put(res);
2144                         }
2145 
2146                         /* new_master has our reference from
2147                          * the lock state sent during recovery */
2148                         mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2149                              dlm->name, res->lockname.len, res->lockname.name,
2150                              res->owner, new_master);
2151                         spin_lock(&res->spinlock);
2152                         dlm_change_lockres_owner(dlm, res, new_master);
2153                         res->state &= ~DLM_LOCK_RES_RECOVERING;
2154                         if (__dlm_lockres_has_locks(res))
2155                                 __dlm_dirty_lockres(dlm, res);
2156                         spin_unlock(&res->spinlock);
2157                         wake_up(&res->wq);
2158                 }
2159         }
2160 }
2161 
2162 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2163 {
2164         if (local) {
2165                 if (lock->ml.type != LKM_EXMODE &&
2166                     lock->ml.type != LKM_PRMODE)
2167                         return 1;
2168         } else if (lock->ml.type == LKM_EXMODE)
2169                 return 1;
2170         return 0;
2171 }
2172 
2173 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2174                                struct dlm_lock_resource *res, u8 dead_node)
2175 {
2176         struct list_head *queue;
2177         struct dlm_lock *lock;
2178         int blank_lvb = 0, local = 0;
2179         int i;
2180         u8 search_node;
2181 
2182         assert_spin_locked(&dlm->spinlock);
2183         assert_spin_locked(&res->spinlock);
2184 
2185         if (res->owner == dlm->node_num)
2186                 /* if this node owned the lockres, and if the dead node
2187                  * had an EX when he died, blank out the lvb */
2188                 search_node = dead_node;
2189         else {
2190                 /* if this is a secondary lockres, and we had no EX or PR
2191                  * locks granted, we can no longer trust the lvb */
2192                 search_node = dlm->node_num;
2193                 local = 1;  /* check local state for valid lvb */
2194         }
2195 
2196         for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2197                 queue = dlm_list_idx_to_ptr(res, i);
2198                 list_for_each_entry(lock, queue, list) {
2199                         if (lock->ml.node == search_node) {
2200                                 if (dlm_lvb_needs_invalidation(lock, local)) {
2201                                         /* zero the lksb lvb and lockres lvb */
2202                                         blank_lvb = 1;
2203                                         memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2204                                 }
2205                         }
2206                 }
2207         }
2208 
2209         if (blank_lvb) {
2210                 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2211                      res->lockname.len, res->lockname.name, dead_node);
2212                 memset(res->lvb, 0, DLM_LVB_LEN);
2213         }
2214 }
2215 
2216 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2217                                 struct dlm_lock_resource *res, u8 dead_node)
2218 {
2219         struct dlm_lock *lock, *next;
2220         unsigned int freed = 0;
2221 
2222         /* this node is the lockres master:
2223          * 1) remove any stale locks for the dead node
2224          * 2) if the dead node had an EX when he died, blank out the lvb
2225          */
2226         assert_spin_locked(&dlm->spinlock);
2227         assert_spin_locked(&res->spinlock);
2228 
2229         /* We do two dlm_lock_put(). One for removing from list and the other is
2230          * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */
2231 
2232         /* TODO: check pending_asts, pending_basts here */
2233         list_for_each_entry_safe(lock, next, &res->granted, list) {
2234                 if (lock->ml.node == dead_node) {
2235                         list_del_init(&lock->list);
2236                         dlm_lock_put(lock);
2237                         /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2238                         dlm_lock_put(lock);
2239                         freed++;
2240                 }
2241         }
2242         list_for_each_entry_safe(lock, next, &res->converting, list) {
2243                 if (lock->ml.node == dead_node) {
2244                         list_del_init(&lock->list);
2245                         dlm_lock_put(lock);
2246                         /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2247                         dlm_lock_put(lock);
2248                         freed++;
2249                 }
2250         }
2251         list_for_each_entry_safe(lock, next, &res->blocked, list) {
2252                 if (lock->ml.node == dead_node) {
2253                         list_del_init(&lock->list);
2254                         dlm_lock_put(lock);
2255                         /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */
2256                         dlm_lock_put(lock);
2257                         freed++;
2258                 }
2259         }
2260 
2261         if (freed) {
2262                 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2263                      "dropping ref from lockres\n", dlm->name,
2264                      res->lockname.len, res->lockname.name, freed, dead_node);
2265                 if(!test_bit(dead_node, res->refmap)) {
2266                         mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
2267                              "but ref was not set\n", dlm->name,
2268                              res->lockname.len, res->lockname.name, freed, dead_node);
2269                         __dlm_print_one_lock_resource(res);
2270                 }
2271                 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2272         } else if (test_bit(dead_node, res->refmap)) {
2273                 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2274                      "no locks and had not purged before dying\n", dlm->name,
2275                      res->lockname.len, res->lockname.name, dead_node);
2276                 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2277         }
2278 
2279         /* do not kick thread yet */
2280         __dlm_dirty_lockres(dlm, res);
2281 }
2282 
2283 /* if this node is the recovery master, and there are no
2284  * locks for a given lockres owned by this node that are in
2285  * either PR or EX mode, zero out the lvb before requesting.
2286  *
2287  */
2288 
2289 
2290 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2291 {
2292         struct dlm_lock_resource *res;
2293         int i;
2294         struct hlist_head *bucket;
2295         struct dlm_lock *lock;
2296 
2297 
2298         /* purge any stale mles */
2299         dlm_clean_master_list(dlm, dead_node);
2300 
2301         /*
2302          * now clean up all lock resources.  there are two rules:
2303          *
2304          * 1) if the dead node was the master, move the lockres
2305          *    to the recovering list.  set the RECOVERING flag.
2306          *    this lockres needs to be cleaned up before it can
2307          *    be used further.
2308          *
2309          * 2) if this node was the master, remove all locks from
2310          *    each of the lockres queues that were owned by the
2311          *    dead node.  once recovery finishes, the dlm thread
2312          *    can be kicked again to see if any ASTs or BASTs
2313          *    need to be fired as a result.
2314          */
2315         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2316                 bucket = dlm_lockres_hash(dlm, i);
2317                 hlist_for_each_entry(res, bucket, hash_node) {
2318                         /* always prune any $RECOVERY entries for dead nodes,
2319                          * otherwise hangs can occur during later recovery */
2320                         if (dlm_is_recovery_lock(res->lockname.name,
2321                                                  res->lockname.len)) {
2322                                 spin_lock(&res->spinlock);
2323                                 list_for_each_entry(lock, &res->granted, list) {
2324                                         if (lock->ml.node == dead_node) {
2325                                                 mlog(0, "AHA! there was "
2326                                                      "a $RECOVERY lock for dead "
2327                                                      "node %u (%s)!\n",
2328                                                      dead_node, dlm->name);
2329                                                 list_del_init(&lock->list);
2330                                                 dlm_lock_put(lock);
2331                                                 break;
2332                                         }
2333                                 }
2334                                 dlm_lockres_clear_refmap_bit(dlm, res,
2335                                                 dead_node);
2336                                 spin_unlock(&res->spinlock);
2337                                 continue;
2338                         }
2339                         spin_lock(&res->spinlock);
2340                         /* zero the lvb if necessary */
2341                         dlm_revalidate_lvb(dlm, res, dead_node);
2342                         if (res->owner == dead_node) {
2343                                 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2344                                         mlog(ML_NOTICE, "%s: res %.*s, Skip "
2345                                              "recovery as it is being freed\n",
2346                                              dlm->name, res->lockname.len,
2347                                              res->lockname.name);
2348                                 } else
2349                                         dlm_move_lockres_to_recovery_list(dlm,
2350                                                                           res);
2351 
2352                         } else if (res->owner == dlm->node_num) {
2353                                 dlm_free_dead_locks(dlm, res, dead_node);
2354                                 __dlm_lockres_calc_usage(dlm, res);
2355                         } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2356                                 if (test_bit(dead_node, res->refmap)) {
2357                                         mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2358                                                 "no locks and had not purged before dying\n",
2359                                                 dlm->name, res->lockname.len,
2360                                                 res->lockname.name, dead_node);
2361                                         dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2362                                 }
2363                         }
2364                         spin_unlock(&res->spinlock);
2365                 }
2366         }
2367 
2368 }
2369 
2370 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2371 {
2372         assert_spin_locked(&dlm->spinlock);
2373 
2374         if (dlm->reco.new_master == idx) {
2375                 mlog(0, "%s: recovery master %d just died\n",
2376                      dlm->name, idx);
2377                 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2378                         /* finalize1 was reached, so it is safe to clear
2379                          * the new_master and dead_node.  that recovery
2380                          * is complete. */
2381                         mlog(0, "%s: dead master %d had reached "
2382                              "finalize1 state, clearing\n", dlm->name, idx);
2383                         dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2384                         __dlm_reset_recovery(dlm);
2385                 }
2386         }
2387 
2388         /* Clean up join state on node death. */
2389         if (dlm->joining_node == idx) {
2390                 mlog(0, "Clearing join state for node %u\n", idx);
2391                 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2392         }
2393 
2394         /* check to see if the node is already considered dead */
2395         if (!test_bit(idx, dlm->live_nodes_map)) {
2396                 mlog(0, "for domain %s, node %d is already dead. "
2397                      "another node likely did recovery already.\n",
2398                      dlm->name, idx);
2399                 return;
2400         }
2401 
2402         /* check to see if we do not care about this node */
2403         if (!test_bit(idx, dlm->domain_map)) {
2404                 /* This also catches the case that we get a node down
2405                  * but haven't joined the domain yet. */
2406                 mlog(0, "node %u already removed from domain!\n", idx);
2407                 return;
2408         }
2409 
2410         clear_bit(idx, dlm->live_nodes_map);
2411 
2412         /* make sure local cleanup occurs before the heartbeat events */
2413         if (!test_bit(idx, dlm->recovery_map))
2414                 dlm_do_local_recovery_cleanup(dlm, idx);
2415 
2416         /* notify anything attached to the heartbeat events */
2417         dlm_hb_event_notify_attached(dlm, idx, 0);
2418 
2419         mlog(0, "node %u being removed from domain map!\n", idx);
2420         clear_bit(idx, dlm->domain_map);
2421         clear_bit(idx, dlm->exit_domain_map);
2422         /* wake up migration waiters if a node goes down.
2423          * perhaps later we can genericize this for other waiters. */
2424         wake_up(&dlm->migration_wq);
2425 
2426         if (test_bit(idx, dlm->recovery_map))
2427                 mlog(0, "domain %s, node %u already added "
2428                      "to recovery map!\n", dlm->name, idx);
2429         else
2430                 set_bit(idx, dlm->recovery_map);
2431 }
2432 
2433 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2434 {
2435         struct dlm_ctxt *dlm = data;
2436 
2437         if (!dlm_grab(dlm))
2438                 return;
2439 
2440         /*
2441          * This will notify any dlm users that a node in our domain
2442          * went away without notifying us first.
2443          */
2444         if (test_bit(idx, dlm->domain_map))
2445                 dlm_fire_domain_eviction_callbacks(dlm, idx);
2446 
2447         spin_lock(&dlm->spinlock);
2448         __dlm_hb_node_down(dlm, idx);
2449         spin_unlock(&dlm->spinlock);
2450 
2451         dlm_put(dlm);
2452 }
2453 
2454 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2455 {
2456         struct dlm_ctxt *dlm = data;
2457 
2458         if (!dlm_grab(dlm))
2459                 return;
2460 
2461         spin_lock(&dlm->spinlock);
2462         set_bit(idx, dlm->live_nodes_map);
2463         /* do NOT notify mle attached to the heartbeat events.
2464          * new nodes are not interesting in mastery until joined. */
2465         spin_unlock(&dlm->spinlock);
2466 
2467         dlm_put(dlm);
2468 }
2469 
2470 static void dlm_reco_ast(void *astdata)
2471 {
2472         struct dlm_ctxt *dlm = astdata;
2473         mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2474              dlm->node_num, dlm->name);
2475 }
2476 static void dlm_reco_bast(void *astdata, int blocked_type)
2477 {
2478         struct dlm_ctxt *dlm = astdata;
2479         mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2480              dlm->node_num, dlm->name);
2481 }
2482 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2483 {
2484         mlog(0, "unlockast for recovery lock fired!\n");
2485 }
2486 
2487 /*
2488  * dlm_pick_recovery_master will continually attempt to use
2489  * dlmlock() on the special "$RECOVERY" lockres with the
2490  * LKM_NOQUEUE flag to get an EX.  every thread that enters
2491  * this function on each node racing to become the recovery
2492  * master will not stop attempting this until either:
2493  * a) this node gets the EX (and becomes the recovery master),
2494  * or b) dlm->reco.new_master gets set to some nodenum
2495  * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2496  * so each time a recovery master is needed, the entire cluster
2497  * will sync at this point.  if the new master dies, that will
2498  * be detected in dlm_do_recovery */
2499 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2500 {
2501         enum dlm_status ret;
2502         struct dlm_lockstatus lksb;
2503         int status = -EINVAL;
2504 
2505         mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2506              dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2507 again:
2508         memset(&lksb, 0, sizeof(lksb));
2509 
2510         ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2511                       DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2512                       dlm_reco_ast, dlm, dlm_reco_bast);
2513 
2514         mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2515              dlm->name, ret, lksb.status);
2516 
2517         if (ret == DLM_NORMAL) {
2518                 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2519                      dlm->name, dlm->node_num);
2520 
2521                 /* got the EX lock.  check to see if another node
2522                  * just became the reco master */
2523                 if (dlm_reco_master_ready(dlm)) {
2524                         mlog(0, "%s: got reco EX lock, but %u will "
2525                              "do the recovery\n", dlm->name,
2526                              dlm->reco.new_master);
2527                         status = -EEXIST;
2528                 } else {
2529                         status = 0;
2530 
2531                         /* see if recovery was already finished elsewhere */
2532                         spin_lock(&dlm->spinlock);
2533                         if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2534                                 status = -EINVAL;
2535                                 mlog(0, "%s: got reco EX lock, but "
2536                                      "node got recovered already\n", dlm->name);
2537                                 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2538                                         mlog(ML_ERROR, "%s: new master is %u "
2539                                              "but no dead node!\n",
2540                                              dlm->name, dlm->reco.new_master);
2541                                         BUG();
2542                                 }
2543                         }
2544                         spin_unlock(&dlm->spinlock);
2545                 }
2546 
2547                 /* if this node has actually become the recovery master,
2548                  * set the master and send the messages to begin recovery */
2549                 if (!status) {
2550                         mlog(0, "%s: dead=%u, this=%u, sending "
2551                              "begin_reco now\n", dlm->name,
2552                              dlm->reco.dead_node, dlm->node_num);
2553                         status = dlm_send_begin_reco_message(dlm,
2554                                       dlm->reco.dead_node);
2555                         /* this always succeeds */
2556                         BUG_ON(status);
2557 
2558                         /* set the new_master to this node */
2559                         spin_lock(&dlm->spinlock);
2560                         dlm_set_reco_master(dlm, dlm->node_num);
2561                         spin_unlock(&dlm->spinlock);
2562                 }
2563 
2564                 /* recovery lock is a special case.  ast will not get fired,
2565                  * so just go ahead and unlock it. */
2566                 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2567                 if (ret == DLM_DENIED) {
2568                         mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2569                         ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2570                 }
2571                 if (ret != DLM_NORMAL) {
2572                         /* this would really suck. this could only happen
2573                          * if there was a network error during the unlock
2574                          * because of node death.  this means the unlock
2575                          * is actually "done" and the lock structure is
2576                          * even freed.  we can continue, but only
2577                          * because this specific lock name is special. */
2578                         mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2579                 }
2580         } else if (ret == DLM_NOTQUEUED) {
2581                 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2582                      dlm->name, dlm->node_num);
2583                 /* another node is master. wait on
2584                  * reco.new_master != O2NM_INVALID_NODE_NUM
2585                  * for at most one second */
2586                 wait_event_timeout(dlm->dlm_reco_thread_wq,
2587                                          dlm_reco_master_ready(dlm),
2588                                          msecs_to_jiffies(1000));
2589                 if (!dlm_reco_master_ready(dlm)) {
2590                         mlog(0, "%s: reco master taking awhile\n",
2591                              dlm->name);
2592                         goto again;
2593                 }
2594                 /* another node has informed this one that it is reco master */
2595                 mlog(0, "%s: reco master %u is ready to recover %u\n",
2596                      dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2597                 status = -EEXIST;
2598         } else if (ret == DLM_RECOVERING) {
2599                 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2600                      dlm->name, dlm->node_num);
2601                 goto again;
2602         } else {
2603                 struct dlm_lock_resource *res;
2604 
2605                 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2606                 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2607                      "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2608                      dlm_errname(lksb.status));
2609                 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2610                                          DLM_RECOVERY_LOCK_NAME_LEN);
2611                 if (res) {
2612                         dlm_print_one_lock_resource(res);
2613                         dlm_lockres_put(res);
2614                 } else {
2615                         mlog(ML_ERROR, "recovery lock not found\n");
2616                 }
2617                 BUG();
2618         }
2619 
2620         return status;
2621 }
2622 
2623 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2624 {
2625         struct dlm_begin_reco br;
2626         int ret = 0;
2627         struct dlm_node_iter iter;
2628         int nodenum;
2629         int status;
2630 
2631         mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2632 
2633         spin_lock(&dlm->spinlock);
2634         dlm_node_iter_init(dlm->domain_map, &iter);
2635         spin_unlock(&dlm->spinlock);
2636 
2637         clear_bit(dead_node, iter.node_map);
2638 
2639         memset(&br, 0, sizeof(br));
2640         br.node_idx = dlm->node_num;
2641         br.dead_node = dead_node;
2642 
2643         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2644                 ret = 0;
2645                 if (nodenum == dead_node) {
2646                         mlog(0, "not sending begin reco to dead node "
2647                                   "%u\n", dead_node);
2648                         continue;
2649                 }
2650                 if (nodenum == dlm->node_num) {
2651                         mlog(0, "not sending begin reco to self\n");
2652                         continue;
2653                 }
2654 retry:
2655                 ret = -EINVAL;
2656                 mlog(0, "attempting to send begin reco msg to %d\n",
2657                           nodenum);
2658                 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2659                                          &br, sizeof(br), nodenum, &status);
2660                 /* negative status is handled ok by caller here */
2661                 if (ret >= 0)
2662                         ret = status;
2663                 if (dlm_is_host_down(ret)) {
2664                         /* node is down.  not involved in recovery
2665                          * so just keep going */
2666                         mlog(ML_NOTICE, "%s: node %u was down when sending "
2667                              "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2668                         ret = 0;
2669                 }
2670 
2671                 /*
2672                  * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
2673                  * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
2674                  * We are handling both for compatibility reasons.
2675                  */
2676                 if (ret == -EAGAIN || ret == EAGAIN) {
2677                         mlog(0, "%s: trying to start recovery of node "
2678                              "%u, but node %u is waiting for last recovery "
2679                              "to complete, backoff for a bit\n", dlm->name,
2680                              dead_node, nodenum);
2681                         msleep(100);
2682                         goto retry;
2683                 }
2684                 if (ret < 0) {
2685                         struct dlm_lock_resource *res;
2686 
2687                         /* this is now a serious problem, possibly ENOMEM
2688                          * in the network stack.  must retry */
2689                         mlog_errno(ret);
2690                         mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2691                              "returned %d\n", dlm->name, nodenum, ret);
2692                         res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2693                                                  DLM_RECOVERY_LOCK_NAME_LEN);
2694                         if (res) {
2695                                 dlm_print_one_lock_resource(res);
2696                                 dlm_lockres_put(res);
2697                         } else {
2698                                 mlog(ML_ERROR, "recovery lock not found\n");
2699                         }
2700                         /* sleep for a bit in hopes that we can avoid
2701                          * another ENOMEM */
2702                         msleep(100);
2703                         goto retry;
2704                 }
2705         }
2706 
2707         return ret;
2708 }
2709 
2710 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2711                            void **ret_data)
2712 {
2713         struct dlm_ctxt *dlm = data;
2714         struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2715 
2716         /* ok to return 0, domain has gone away */
2717         if (!dlm_grab(dlm))
2718                 return 0;
2719 
2720         spin_lock(&dlm->spinlock);
2721         if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2722                 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2723                      "but this node is in finalize state, waiting on finalize2\n",
2724                      dlm->name, br->node_idx, br->dead_node,
2725                      dlm->reco.dead_node, dlm->reco.new_master);
2726                 spin_unlock(&dlm->spinlock);
2727                 dlm_put(dlm);
2728                 return -EAGAIN;
2729         }
2730         spin_unlock(&dlm->spinlock);
2731 
2732         mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2733              dlm->name, br->node_idx, br->dead_node,
2734              dlm->reco.dead_node, dlm->reco.new_master);
2735 
2736         dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2737 
2738         spin_lock(&dlm->spinlock);
2739         if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2740                 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2741                         mlog(0, "%s: new_master %u died, changing "
2742                              "to %u\n", dlm->name, dlm->reco.new_master,
2743                              br->node_idx);
2744                 } else {
2745                         mlog(0, "%s: new_master %u NOT DEAD, changing "
2746                              "to %u\n", dlm->name, dlm->reco.new_master,
2747                              br->node_idx);
2748                         /* may not have seen the new master as dead yet */
2749                 }
2750         }
2751         if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2752                 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2753                      "node %u changing it to %u\n", dlm->name,
2754                      dlm->reco.dead_node, br->node_idx, br->dead_node);
2755         }
2756         dlm_set_reco_master(dlm, br->node_idx);
2757         dlm_set_reco_dead_node(dlm, br->dead_node);
2758         if (!test_bit(br->dead_node, dlm->recovery_map)) {
2759                 mlog(0, "recovery master %u sees %u as dead, but this "
2760                      "node has not yet.  marking %u as dead\n",
2761                      br->node_idx, br->dead_node, br->dead_node);
2762                 if (!test_bit(br->dead_node, dlm->domain_map) ||
2763                     !test_bit(br->dead_node, dlm->live_nodes_map))
2764                         mlog(0, "%u not in domain/live_nodes map "
2765                              "so setting it in reco map manually\n",
2766                              br->dead_node);
2767                 /* force the recovery cleanup in __dlm_hb_node_down
2768                  * both of these will be cleared in a moment */
2769                 set_bit(br->dead_node, dlm->domain_map);
2770                 set_bit(br->dead_node, dlm->live_nodes_map);
2771                 __dlm_hb_node_down(dlm, br->dead_node);
2772         }
2773         spin_unlock(&dlm->spinlock);
2774 
2775         dlm_kick_recovery_thread(dlm);
2776 
2777         mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2778              dlm->name, br->node_idx, br->dead_node,
2779              dlm->reco.dead_node, dlm->reco.new_master);
2780 
2781         dlm_put(dlm);
2782         return 0;
2783 }
2784 
2785 #define DLM_FINALIZE_STAGE2  0x01
2786 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2787 {
2788         int ret = 0;
2789         struct dlm_finalize_reco fr;
2790         struct dlm_node_iter iter;
2791         int nodenum;
2792         int status;
2793         int stage = 1;
2794 
2795         mlog(0, "finishing recovery for node %s:%u, "
2796              "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2797 
2798         spin_lock(&dlm->spinlock);
2799         dlm_node_iter_init(dlm->domain_map, &iter);
2800         spin_unlock(&dlm->spinlock);
2801 
2802 stage2:
2803         memset(&fr, 0, sizeof(fr));
2804         fr.node_idx = dlm->node_num;
2805         fr.dead_node = dlm->reco.dead_node;
2806         if (stage == 2)
2807                 fr.flags |= DLM_FINALIZE_STAGE2;
2808 
2809         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2810                 if (nodenum == dlm->node_num)
2811                         continue;
2812                 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2813                                          &fr, sizeof(fr), nodenum, &status);
2814                 if (ret >= 0)
2815                         ret = status;
2816                 if (ret < 0) {
2817                         mlog(ML_ERROR, "Error %d when sending message %u (key "
2818                              "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG,
2819                              dlm->key, nodenum);
2820                         if (dlm_is_host_down(ret)) {
2821                                 /* this has no effect on this recovery
2822                                  * session, so set the status to zero to
2823                                  * finish out the last recovery */
2824                                 mlog(ML_ERROR, "node %u went down after this "
2825                                      "node finished recovery.\n", nodenum);
2826                                 ret = 0;
2827                                 continue;
2828                         }
2829                         break;
2830                 }
2831         }
2832         if (stage == 1) {
2833                 /* reset the node_iter back to the top and send finalize2 */
2834                 iter.curnode = -1;
2835                 stage = 2;
2836                 goto stage2;
2837         }
2838 
2839         return ret;
2840 }
2841 
2842 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2843                               void **ret_data)
2844 {
2845         struct dlm_ctxt *dlm = data;
2846         struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2847         int stage = 1;
2848 
2849         /* ok to return 0, domain has gone away */
2850         if (!dlm_grab(dlm))
2851                 return 0;
2852 
2853         if (fr->flags & DLM_FINALIZE_STAGE2)
2854                 stage = 2;
2855 
2856         mlog(0, "%s: node %u finalizing recovery stage%d of "
2857              "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2858              fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2859 
2860         spin_lock(&dlm->spinlock);
2861 
2862         if (dlm->reco.new_master != fr->node_idx) {
2863                 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2864                      "%u is supposed to be the new master, dead=%u\n",
2865                      fr->node_idx, dlm->reco.new_master, fr->dead_node);
2866                 BUG();
2867         }
2868         if (dlm->reco.dead_node != fr->dead_node) {
2869                 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2870                      "node %u, but node %u is supposed to be dead\n",
2871                      fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2872                 BUG();
2873         }
2874 
2875         switch (stage) {
2876                 case 1:
2877                         dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2878                         if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2879                                 mlog(ML_ERROR, "%s: received finalize1 from "
2880                                      "new master %u for dead node %u, but "
2881                                      "this node has already received it!\n",
2882                                      dlm->name, fr->node_idx, fr->dead_node);
2883                                 dlm_print_reco_node_status(dlm);
2884                                 BUG();
2885                         }
2886                         dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2887                         spin_unlock(&dlm->spinlock);
2888                         break;
2889                 case 2:
2890                         if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2891                                 mlog(ML_ERROR, "%s: received finalize2 from "
2892                                      "new master %u for dead node %u, but "
2893                                      "this node did not have finalize1!\n",
2894                                      dlm->name, fr->node_idx, fr->dead_node);
2895                                 dlm_print_reco_node_status(dlm);
2896                                 BUG();
2897                         }
2898                         dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2899                         __dlm_reset_recovery(dlm);
2900                         spin_unlock(&dlm->spinlock);
2901                         dlm_kick_recovery_thread(dlm);
2902                         break;
2903                 default:
2904                         BUG();
2905         }
2906 
2907         mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2908              dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);
2909 
2910         dlm_put(dlm);
2911         return 0;
2912 }
2913 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp