Linux-libre 4.10.3-gnu
[librecmc/linux-libre.git] / fs / ceph / mds_client.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/fs.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/gfp.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
12
13 #include "super.h"
14 #include "mds_client.h"
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
22
23 /*
24  * A cluster of MDS (metadata server) daemons is responsible for
25  * managing the file system namespace (the directory hierarchy and
26  * inodes) and for coordinating shared access to storage.  Metadata is
27  * partitioning hierarchically across a number of servers, and that
28  * partition varies over time as the cluster adjusts the distribution
29  * in order to balance load.
30  *
31  * The MDS client is primarily responsible to managing synchronous
32  * metadata requests for operations like open, unlink, and so forth.
33  * If there is a MDS failure, we find out about it when we (possibly
34  * request and) receive a new MDS map, and can resubmit affected
35  * requests.
36  *
37  * For the most part, though, we take advantage of a lossless
38  * communications channel to the MDS, and do not need to worry about
39  * timing out or resubmitting requests.
40  *
41  * We maintain a stateful "session" with each MDS we interact with.
42  * Within each session, we sent periodic heartbeat messages to ensure
43  * any capabilities or leases we have been issues remain valid.  If
44  * the session times out and goes stale, our leases and capabilities
45  * are no longer valid.
46  */
47
48 struct ceph_reconnect_state {
49         int nr_caps;
50         struct ceph_pagelist *pagelist;
51         unsigned msg_version;
52 };
53
54 static void __wake_requests(struct ceph_mds_client *mdsc,
55                             struct list_head *head);
56
57 static const struct ceph_connection_operations mds_con_ops;
58
59
60 /*
61  * mds reply parsing
62  */
63
64 /*
65  * parse individual inode info
66  */
67 static int parse_reply_info_in(void **p, void *end,
68                                struct ceph_mds_reply_info_in *info,
69                                u64 features)
70 {
71         int err = -EIO;
72
73         info->in = *p;
74         *p += sizeof(struct ceph_mds_reply_inode) +
75                 sizeof(*info->in->fragtree.splits) *
76                 le32_to_cpu(info->in->fragtree.nsplits);
77
78         ceph_decode_32_safe(p, end, info->symlink_len, bad);
79         ceph_decode_need(p, end, info->symlink_len, bad);
80         info->symlink = *p;
81         *p += info->symlink_len;
82
83         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
84                 ceph_decode_copy_safe(p, end, &info->dir_layout,
85                                       sizeof(info->dir_layout), bad);
86         else
87                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
88
89         ceph_decode_32_safe(p, end, info->xattr_len, bad);
90         ceph_decode_need(p, end, info->xattr_len, bad);
91         info->xattr_data = *p;
92         *p += info->xattr_len;
93
94         if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
95                 ceph_decode_64_safe(p, end, info->inline_version, bad);
96                 ceph_decode_32_safe(p, end, info->inline_len, bad);
97                 ceph_decode_need(p, end, info->inline_len, bad);
98                 info->inline_data = *p;
99                 *p += info->inline_len;
100         } else
101                 info->inline_version = CEPH_INLINE_NONE;
102
103         info->pool_ns_len = 0;
104         info->pool_ns_data = NULL;
105         if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
106                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
107                 if (info->pool_ns_len > 0) {
108                         ceph_decode_need(p, end, info->pool_ns_len, bad);
109                         info->pool_ns_data = *p;
110                         *p += info->pool_ns_len;
111                 }
112         }
113
114         return 0;
115 bad:
116         return err;
117 }
118
119 /*
120  * parse a normal reply, which may contain a (dir+)dentry and/or a
121  * target inode.
122  */
123 static int parse_reply_info_trace(void **p, void *end,
124                                   struct ceph_mds_reply_info_parsed *info,
125                                   u64 features)
126 {
127         int err;
128
129         if (info->head->is_dentry) {
130                 err = parse_reply_info_in(p, end, &info->diri, features);
131                 if (err < 0)
132                         goto out_bad;
133
134                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
135                         goto bad;
136                 info->dirfrag = *p;
137                 *p += sizeof(*info->dirfrag) +
138                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
139                 if (unlikely(*p > end))
140                         goto bad;
141
142                 ceph_decode_32_safe(p, end, info->dname_len, bad);
143                 ceph_decode_need(p, end, info->dname_len, bad);
144                 info->dname = *p;
145                 *p += info->dname_len;
146                 info->dlease = *p;
147                 *p += sizeof(*info->dlease);
148         }
149
150         if (info->head->is_target) {
151                 err = parse_reply_info_in(p, end, &info->targeti, features);
152                 if (err < 0)
153                         goto out_bad;
154         }
155
156         if (unlikely(*p != end))
157                 goto bad;
158         return 0;
159
160 bad:
161         err = -EIO;
162 out_bad:
163         pr_err("problem parsing mds trace %d\n", err);
164         return err;
165 }
166
167 /*
168  * parse readdir results
169  */
170 static int parse_reply_info_dir(void **p, void *end,
171                                 struct ceph_mds_reply_info_parsed *info,
172                                 u64 features)
173 {
174         u32 num, i = 0;
175         int err;
176
177         info->dir_dir = *p;
178         if (*p + sizeof(*info->dir_dir) > end)
179                 goto bad;
180         *p += sizeof(*info->dir_dir) +
181                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
182         if (*p > end)
183                 goto bad;
184
185         ceph_decode_need(p, end, sizeof(num) + 2, bad);
186         num = ceph_decode_32(p);
187         {
188                 u16 flags = ceph_decode_16(p);
189                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
190                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
191                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
192         }
193         if (num == 0)
194                 goto done;
195
196         BUG_ON(!info->dir_entries);
197         if ((unsigned long)(info->dir_entries + num) >
198             (unsigned long)info->dir_entries + info->dir_buf_size) {
199                 pr_err("dir contents are larger than expected\n");
200                 WARN_ON(1);
201                 goto bad;
202         }
203
204         info->dir_nr = num;
205         while (num) {
206                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
207                 /* dentry */
208                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
209                 rde->name_len = ceph_decode_32(p);
210                 ceph_decode_need(p, end, rde->name_len, bad);
211                 rde->name = *p;
212                 *p += rde->name_len;
213                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
214                 rde->lease = *p;
215                 *p += sizeof(struct ceph_mds_reply_lease);
216
217                 /* inode */
218                 err = parse_reply_info_in(p, end, &rde->inode, features);
219                 if (err < 0)
220                         goto out_bad;
221                 /* ceph_readdir_prepopulate() will update it */
222                 rde->offset = 0;
223                 i++;
224                 num--;
225         }
226
227 done:
228         if (*p != end)
229                 goto bad;
230         return 0;
231
232 bad:
233         err = -EIO;
234 out_bad:
235         pr_err("problem parsing dir contents %d\n", err);
236         return err;
237 }
238
239 /*
240  * parse fcntl F_GETLK results
241  */
242 static int parse_reply_info_filelock(void **p, void *end,
243                                      struct ceph_mds_reply_info_parsed *info,
244                                      u64 features)
245 {
246         if (*p + sizeof(*info->filelock_reply) > end)
247                 goto bad;
248
249         info->filelock_reply = *p;
250         *p += sizeof(*info->filelock_reply);
251
252         if (unlikely(*p != end))
253                 goto bad;
254         return 0;
255
256 bad:
257         return -EIO;
258 }
259
260 /*
261  * parse create results
262  */
263 static int parse_reply_info_create(void **p, void *end,
264                                   struct ceph_mds_reply_info_parsed *info,
265                                   u64 features)
266 {
267         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
268                 if (*p == end) {
269                         info->has_create_ino = false;
270                 } else {
271                         info->has_create_ino = true;
272                         info->ino = ceph_decode_64(p);
273                 }
274         }
275
276         if (unlikely(*p != end))
277                 goto bad;
278         return 0;
279
280 bad:
281         return -EIO;
282 }
283
284 /*
285  * parse extra results
286  */
287 static int parse_reply_info_extra(void **p, void *end,
288                                   struct ceph_mds_reply_info_parsed *info,
289                                   u64 features)
290 {
291         u32 op = le32_to_cpu(info->head->op);
292
293         if (op == CEPH_MDS_OP_GETFILELOCK)
294                 return parse_reply_info_filelock(p, end, info, features);
295         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
296                 return parse_reply_info_dir(p, end, info, features);
297         else if (op == CEPH_MDS_OP_CREATE)
298                 return parse_reply_info_create(p, end, info, features);
299         else
300                 return -EIO;
301 }
302
303 /*
304  * parse entire mds reply
305  */
306 static int parse_reply_info(struct ceph_msg *msg,
307                             struct ceph_mds_reply_info_parsed *info,
308                             u64 features)
309 {
310         void *p, *end;
311         u32 len;
312         int err;
313
314         info->head = msg->front.iov_base;
315         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
316         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
317
318         /* trace */
319         ceph_decode_32_safe(&p, end, len, bad);
320         if (len > 0) {
321                 ceph_decode_need(&p, end, len, bad);
322                 err = parse_reply_info_trace(&p, p+len, info, features);
323                 if (err < 0)
324                         goto out_bad;
325         }
326
327         /* extra */
328         ceph_decode_32_safe(&p, end, len, bad);
329         if (len > 0) {
330                 ceph_decode_need(&p, end, len, bad);
331                 err = parse_reply_info_extra(&p, p+len, info, features);
332                 if (err < 0)
333                         goto out_bad;
334         }
335
336         /* snap blob */
337         ceph_decode_32_safe(&p, end, len, bad);
338         info->snapblob_len = len;
339         info->snapblob = p;
340         p += len;
341
342         if (p != end)
343                 goto bad;
344         return 0;
345
346 bad:
347         err = -EIO;
348 out_bad:
349         pr_err("mds parse_reply err %d\n", err);
350         return err;
351 }
352
353 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
354 {
355         if (!info->dir_entries)
356                 return;
357         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
358 }
359
360
361 /*
362  * sessions
363  */
364 const char *ceph_session_state_name(int s)
365 {
366         switch (s) {
367         case CEPH_MDS_SESSION_NEW: return "new";
368         case CEPH_MDS_SESSION_OPENING: return "opening";
369         case CEPH_MDS_SESSION_OPEN: return "open";
370         case CEPH_MDS_SESSION_HUNG: return "hung";
371         case CEPH_MDS_SESSION_CLOSING: return "closing";
372         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
373         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
374         case CEPH_MDS_SESSION_REJECTED: return "rejected";
375         default: return "???";
376         }
377 }
378
379 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
380 {
381         if (atomic_inc_not_zero(&s->s_ref)) {
382                 dout("mdsc get_session %p %d -> %d\n", s,
383                      atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
384                 return s;
385         } else {
386                 dout("mdsc get_session %p 0 -- FAIL", s);
387                 return NULL;
388         }
389 }
390
391 void ceph_put_mds_session(struct ceph_mds_session *s)
392 {
393         dout("mdsc put_session %p %d -> %d\n", s,
394              atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
395         if (atomic_dec_and_test(&s->s_ref)) {
396                 if (s->s_auth.authorizer)
397                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
398                 kfree(s);
399         }
400 }
401
402 /*
403  * called under mdsc->mutex
404  */
405 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
406                                                    int mds)
407 {
408         struct ceph_mds_session *session;
409
410         if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
411                 return NULL;
412         session = mdsc->sessions[mds];
413         dout("lookup_mds_session %p %d\n", session,
414              atomic_read(&session->s_ref));
415         get_session(session);
416         return session;
417 }
418
419 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
420 {
421         if (mds >= mdsc->max_sessions)
422                 return false;
423         return mdsc->sessions[mds];
424 }
425
426 static int __verify_registered_session(struct ceph_mds_client *mdsc,
427                                        struct ceph_mds_session *s)
428 {
429         if (s->s_mds >= mdsc->max_sessions ||
430             mdsc->sessions[s->s_mds] != s)
431                 return -ENOENT;
432         return 0;
433 }
434
435 /*
436  * create+register a new session for given mds.
437  * called under mdsc->mutex.
438  */
439 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
440                                                  int mds)
441 {
442         struct ceph_mds_session *s;
443
444         if (mds >= mdsc->mdsmap->m_max_mds)
445                 return ERR_PTR(-EINVAL);
446
447         s = kzalloc(sizeof(*s), GFP_NOFS);
448         if (!s)
449                 return ERR_PTR(-ENOMEM);
450         s->s_mdsc = mdsc;
451         s->s_mds = mds;
452         s->s_state = CEPH_MDS_SESSION_NEW;
453         s->s_ttl = 0;
454         s->s_seq = 0;
455         mutex_init(&s->s_mutex);
456
457         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
458
459         spin_lock_init(&s->s_gen_ttl_lock);
460         s->s_cap_gen = 0;
461         s->s_cap_ttl = jiffies - 1;
462
463         spin_lock_init(&s->s_cap_lock);
464         s->s_renew_requested = 0;
465         s->s_renew_seq = 0;
466         INIT_LIST_HEAD(&s->s_caps);
467         s->s_nr_caps = 0;
468         s->s_trim_caps = 0;
469         atomic_set(&s->s_ref, 1);
470         INIT_LIST_HEAD(&s->s_waiting);
471         INIT_LIST_HEAD(&s->s_unsafe);
472         s->s_num_cap_releases = 0;
473         s->s_cap_reconnect = 0;
474         s->s_cap_iterator = NULL;
475         INIT_LIST_HEAD(&s->s_cap_releases);
476         INIT_LIST_HEAD(&s->s_cap_flushing);
477
478         dout("register_session mds%d\n", mds);
479         if (mds >= mdsc->max_sessions) {
480                 int newmax = 1 << get_count_order(mds+1);
481                 struct ceph_mds_session **sa;
482
483                 dout("register_session realloc to %d\n", newmax);
484                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
485                 if (sa == NULL)
486                         goto fail_realloc;
487                 if (mdsc->sessions) {
488                         memcpy(sa, mdsc->sessions,
489                                mdsc->max_sessions * sizeof(void *));
490                         kfree(mdsc->sessions);
491                 }
492                 mdsc->sessions = sa;
493                 mdsc->max_sessions = newmax;
494         }
495         mdsc->sessions[mds] = s;
496         atomic_inc(&mdsc->num_sessions);
497         atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
498
499         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
500                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
501
502         return s;
503
504 fail_realloc:
505         kfree(s);
506         return ERR_PTR(-ENOMEM);
507 }
508
509 /*
510  * called under mdsc->mutex
511  */
512 static void __unregister_session(struct ceph_mds_client *mdsc,
513                                struct ceph_mds_session *s)
514 {
515         dout("__unregister_session mds%d %p\n", s->s_mds, s);
516         BUG_ON(mdsc->sessions[s->s_mds] != s);
517         mdsc->sessions[s->s_mds] = NULL;
518         ceph_con_close(&s->s_con);
519         ceph_put_mds_session(s);
520         atomic_dec(&mdsc->num_sessions);
521 }
522
523 /*
524  * drop session refs in request.
525  *
526  * should be last request ref, or hold mdsc->mutex
527  */
528 static void put_request_session(struct ceph_mds_request *req)
529 {
530         if (req->r_session) {
531                 ceph_put_mds_session(req->r_session);
532                 req->r_session = NULL;
533         }
534 }
535
536 void ceph_mdsc_release_request(struct kref *kref)
537 {
538         struct ceph_mds_request *req = container_of(kref,
539                                                     struct ceph_mds_request,
540                                                     r_kref);
541         destroy_reply_info(&req->r_reply_info);
542         if (req->r_request)
543                 ceph_msg_put(req->r_request);
544         if (req->r_reply)
545                 ceph_msg_put(req->r_reply);
546         if (req->r_inode) {
547                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
548                 iput(req->r_inode);
549         }
550         if (req->r_locked_dir)
551                 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
552         iput(req->r_target_inode);
553         if (req->r_dentry)
554                 dput(req->r_dentry);
555         if (req->r_old_dentry)
556                 dput(req->r_old_dentry);
557         if (req->r_old_dentry_dir) {
558                 /*
559                  * track (and drop pins for) r_old_dentry_dir
560                  * separately, since r_old_dentry's d_parent may have
561                  * changed between the dir mutex being dropped and
562                  * this request being freed.
563                  */
564                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
565                                   CEPH_CAP_PIN);
566                 iput(req->r_old_dentry_dir);
567         }
568         kfree(req->r_path1);
569         kfree(req->r_path2);
570         if (req->r_pagelist)
571                 ceph_pagelist_release(req->r_pagelist);
572         put_request_session(req);
573         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
574         kfree(req);
575 }
576
577 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
578
579 /*
580  * lookup session, bump ref if found.
581  *
582  * called under mdsc->mutex.
583  */
584 static struct ceph_mds_request *
585 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
586 {
587         struct ceph_mds_request *req;
588
589         req = lookup_request(&mdsc->request_tree, tid);
590         if (req)
591                 ceph_mdsc_get_request(req);
592
593         return req;
594 }
595
596 /*
597  * Register an in-flight request, and assign a tid.  Link to directory
598  * are modifying (if any).
599  *
600  * Called under mdsc->mutex.
601  */
602 static void __register_request(struct ceph_mds_client *mdsc,
603                                struct ceph_mds_request *req,
604                                struct inode *dir)
605 {
606         req->r_tid = ++mdsc->last_tid;
607         if (req->r_num_caps)
608                 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
609                                   req->r_num_caps);
610         dout("__register_request %p tid %lld\n", req, req->r_tid);
611         ceph_mdsc_get_request(req);
612         insert_request(&mdsc->request_tree, req);
613
614         req->r_uid = current_fsuid();
615         req->r_gid = current_fsgid();
616
617         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
618                 mdsc->oldest_tid = req->r_tid;
619
620         if (dir) {
621                 ihold(dir);
622                 req->r_unsafe_dir = dir;
623         }
624 }
625
626 static void __unregister_request(struct ceph_mds_client *mdsc,
627                                  struct ceph_mds_request *req)
628 {
629         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
630
631         /* Never leave an unregistered request on an unsafe list! */
632         list_del_init(&req->r_unsafe_item);
633
634         if (req->r_tid == mdsc->oldest_tid) {
635                 struct rb_node *p = rb_next(&req->r_node);
636                 mdsc->oldest_tid = 0;
637                 while (p) {
638                         struct ceph_mds_request *next_req =
639                                 rb_entry(p, struct ceph_mds_request, r_node);
640                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
641                                 mdsc->oldest_tid = next_req->r_tid;
642                                 break;
643                         }
644                         p = rb_next(p);
645                 }
646         }
647
648         erase_request(&mdsc->request_tree, req);
649
650         if (req->r_unsafe_dir && req->r_got_unsafe) {
651                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
652                 spin_lock(&ci->i_unsafe_lock);
653                 list_del_init(&req->r_unsafe_dir_item);
654                 spin_unlock(&ci->i_unsafe_lock);
655         }
656         if (req->r_target_inode && req->r_got_unsafe) {
657                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
658                 spin_lock(&ci->i_unsafe_lock);
659                 list_del_init(&req->r_unsafe_target_item);
660                 spin_unlock(&ci->i_unsafe_lock);
661         }
662
663         if (req->r_unsafe_dir) {
664                 iput(req->r_unsafe_dir);
665                 req->r_unsafe_dir = NULL;
666         }
667
668         complete_all(&req->r_safe_completion);
669
670         ceph_mdsc_put_request(req);
671 }
672
673 /*
674  * Choose mds to send request to next.  If there is a hint set in the
675  * request (e.g., due to a prior forward hint from the mds), use that.
676  * Otherwise, consult frag tree and/or caps to identify the
677  * appropriate mds.  If all else fails, choose randomly.
678  *
679  * Called under mdsc->mutex.
680  */
681 static struct dentry *get_nonsnap_parent(struct dentry *dentry)
682 {
683         /*
684          * we don't need to worry about protecting the d_parent access
685          * here because we never renaming inside the snapped namespace
686          * except to resplice to another snapdir, and either the old or new
687          * result is a valid result.
688          */
689         while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
690                 dentry = dentry->d_parent;
691         return dentry;
692 }
693
694 static int __choose_mds(struct ceph_mds_client *mdsc,
695                         struct ceph_mds_request *req)
696 {
697         struct inode *inode;
698         struct ceph_inode_info *ci;
699         struct ceph_cap *cap;
700         int mode = req->r_direct_mode;
701         int mds = -1;
702         u32 hash = req->r_direct_hash;
703         bool is_hash = req->r_direct_is_hash;
704
705         /*
706          * is there a specific mds we should try?  ignore hint if we have
707          * no session and the mds is not up (active or recovering).
708          */
709         if (req->r_resend_mds >= 0 &&
710             (__have_session(mdsc, req->r_resend_mds) ||
711              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
712                 dout("choose_mds using resend_mds mds%d\n",
713                      req->r_resend_mds);
714                 return req->r_resend_mds;
715         }
716
717         if (mode == USE_RANDOM_MDS)
718                 goto random;
719
720         inode = NULL;
721         if (req->r_inode) {
722                 inode = req->r_inode;
723         } else if (req->r_dentry) {
724                 /* ignore race with rename; old or new d_parent is okay */
725                 struct dentry *parent = req->r_dentry->d_parent;
726                 struct inode *dir = d_inode(parent);
727
728                 if (dir->i_sb != mdsc->fsc->sb) {
729                         /* not this fs! */
730                         inode = d_inode(req->r_dentry);
731                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
732                         /* direct snapped/virtual snapdir requests
733                          * based on parent dir inode */
734                         struct dentry *dn = get_nonsnap_parent(parent);
735                         inode = d_inode(dn);
736                         dout("__choose_mds using nonsnap parent %p\n", inode);
737                 } else {
738                         /* dentry target */
739                         inode = d_inode(req->r_dentry);
740                         if (!inode || mode == USE_AUTH_MDS) {
741                                 /* dir + name */
742                                 inode = dir;
743                                 hash = ceph_dentry_hash(dir, req->r_dentry);
744                                 is_hash = true;
745                         }
746                 }
747         }
748
749         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
750              (int)hash, mode);
751         if (!inode)
752                 goto random;
753         ci = ceph_inode(inode);
754
755         if (is_hash && S_ISDIR(inode->i_mode)) {
756                 struct ceph_inode_frag frag;
757                 int found;
758
759                 ceph_choose_frag(ci, hash, &frag, &found);
760                 if (found) {
761                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
762                                 u8 r;
763
764                                 /* choose a random replica */
765                                 get_random_bytes(&r, 1);
766                                 r %= frag.ndist;
767                                 mds = frag.dist[r];
768                                 dout("choose_mds %p %llx.%llx "
769                                      "frag %u mds%d (%d/%d)\n",
770                                      inode, ceph_vinop(inode),
771                                      frag.frag, mds,
772                                      (int)r, frag.ndist);
773                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
774                                     CEPH_MDS_STATE_ACTIVE)
775                                         return mds;
776                         }
777
778                         /* since this file/dir wasn't known to be
779                          * replicated, then we want to look for the
780                          * authoritative mds. */
781                         mode = USE_AUTH_MDS;
782                         if (frag.mds >= 0) {
783                                 /* choose auth mds */
784                                 mds = frag.mds;
785                                 dout("choose_mds %p %llx.%llx "
786                                      "frag %u mds%d (auth)\n",
787                                      inode, ceph_vinop(inode), frag.frag, mds);
788                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
789                                     CEPH_MDS_STATE_ACTIVE)
790                                         return mds;
791                         }
792                 }
793         }
794
795         spin_lock(&ci->i_ceph_lock);
796         cap = NULL;
797         if (mode == USE_AUTH_MDS)
798                 cap = ci->i_auth_cap;
799         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
800                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
801         if (!cap) {
802                 spin_unlock(&ci->i_ceph_lock);
803                 goto random;
804         }
805         mds = cap->session->s_mds;
806         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
807              inode, ceph_vinop(inode), mds,
808              cap == ci->i_auth_cap ? "auth " : "", cap);
809         spin_unlock(&ci->i_ceph_lock);
810         return mds;
811
812 random:
813         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
814         dout("choose_mds chose random mds%d\n", mds);
815         return mds;
816 }
817
818
819 /*
820  * session messages
821  */
822 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
823 {
824         struct ceph_msg *msg;
825         struct ceph_mds_session_head *h;
826
827         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
828                            false);
829         if (!msg) {
830                 pr_err("create_session_msg ENOMEM creating msg\n");
831                 return NULL;
832         }
833         h = msg->front.iov_base;
834         h->op = cpu_to_le32(op);
835         h->seq = cpu_to_le64(seq);
836
837         return msg;
838 }
839
840 /*
841  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
842  * to include additional client metadata fields.
843  */
844 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
845 {
846         struct ceph_msg *msg;
847         struct ceph_mds_session_head *h;
848         int i = -1;
849         int metadata_bytes = 0;
850         int metadata_key_count = 0;
851         struct ceph_options *opt = mdsc->fsc->client->options;
852         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
853         void *p;
854
855         const char* metadata[][2] = {
856                 {"hostname", utsname()->nodename},
857                 {"kernel_version", utsname()->release},
858                 {"entity_id", opt->name ? : ""},
859                 {"root", fsopt->server_path ? : "/"},
860                 {NULL, NULL}
861         };
862
863         /* Calculate serialized length of metadata */
864         metadata_bytes = 4;  /* map length */
865         for (i = 0; metadata[i][0] != NULL; ++i) {
866                 metadata_bytes += 8 + strlen(metadata[i][0]) +
867                         strlen(metadata[i][1]);
868                 metadata_key_count++;
869         }
870
871         /* Allocate the message */
872         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
873                            GFP_NOFS, false);
874         if (!msg) {
875                 pr_err("create_session_msg ENOMEM creating msg\n");
876                 return NULL;
877         }
878         h = msg->front.iov_base;
879         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
880         h->seq = cpu_to_le64(seq);
881
882         /*
883          * Serialize client metadata into waiting buffer space, using
884          * the format that userspace expects for map<string, string>
885          *
886          * ClientSession messages with metadata are v2
887          */
888         msg->hdr.version = cpu_to_le16(2);
889         msg->hdr.compat_version = cpu_to_le16(1);
890
891         /* The write pointer, following the session_head structure */
892         p = msg->front.iov_base + sizeof(*h);
893
894         /* Number of entries in the map */
895         ceph_encode_32(&p, metadata_key_count);
896
897         /* Two length-prefixed strings for each entry in the map */
898         for (i = 0; metadata[i][0] != NULL; ++i) {
899                 size_t const key_len = strlen(metadata[i][0]);
900                 size_t const val_len = strlen(metadata[i][1]);
901
902                 ceph_encode_32(&p, key_len);
903                 memcpy(p, metadata[i][0], key_len);
904                 p += key_len;
905                 ceph_encode_32(&p, val_len);
906                 memcpy(p, metadata[i][1], val_len);
907                 p += val_len;
908         }
909
910         return msg;
911 }
912
913 /*
914  * send session open request.
915  *
916  * called under mdsc->mutex
917  */
918 static int __open_session(struct ceph_mds_client *mdsc,
919                           struct ceph_mds_session *session)
920 {
921         struct ceph_msg *msg;
922         int mstate;
923         int mds = session->s_mds;
924
925         /* wait for mds to go active? */
926         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
927         dout("open_session to mds%d (%s)\n", mds,
928              ceph_mds_state_name(mstate));
929         session->s_state = CEPH_MDS_SESSION_OPENING;
930         session->s_renew_requested = jiffies;
931
932         /* send connect message */
933         msg = create_session_open_msg(mdsc, session->s_seq);
934         if (!msg)
935                 return -ENOMEM;
936         ceph_con_send(&session->s_con, msg);
937         return 0;
938 }
939
940 /*
941  * open sessions for any export targets for the given mds
942  *
943  * called under mdsc->mutex
944  */
945 static struct ceph_mds_session *
946 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
947 {
948         struct ceph_mds_session *session;
949
950         session = __ceph_lookup_mds_session(mdsc, target);
951         if (!session) {
952                 session = register_session(mdsc, target);
953                 if (IS_ERR(session))
954                         return session;
955         }
956         if (session->s_state == CEPH_MDS_SESSION_NEW ||
957             session->s_state == CEPH_MDS_SESSION_CLOSING)
958                 __open_session(mdsc, session);
959
960         return session;
961 }
962
963 struct ceph_mds_session *
964 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
965 {
966         struct ceph_mds_session *session;
967
968         dout("open_export_target_session to mds%d\n", target);
969
970         mutex_lock(&mdsc->mutex);
971         session = __open_export_target_session(mdsc, target);
972         mutex_unlock(&mdsc->mutex);
973
974         return session;
975 }
976
977 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
978                                           struct ceph_mds_session *session)
979 {
980         struct ceph_mds_info *mi;
981         struct ceph_mds_session *ts;
982         int i, mds = session->s_mds;
983
984         if (mds >= mdsc->mdsmap->m_max_mds)
985                 return;
986
987         mi = &mdsc->mdsmap->m_info[mds];
988         dout("open_export_target_sessions for mds%d (%d targets)\n",
989              session->s_mds, mi->num_export_targets);
990
991         for (i = 0; i < mi->num_export_targets; i++) {
992                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
993                 if (!IS_ERR(ts))
994                         ceph_put_mds_session(ts);
995         }
996 }
997
998 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
999                                            struct ceph_mds_session *session)
1000 {
1001         mutex_lock(&mdsc->mutex);
1002         __open_export_target_sessions(mdsc, session);
1003         mutex_unlock(&mdsc->mutex);
1004 }
1005
1006 /*
1007  * session caps
1008  */
1009
1010 /* caller holds s_cap_lock, we drop it */
1011 static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
1012                                  struct ceph_mds_session *session)
1013         __releases(session->s_cap_lock)
1014 {
1015         LIST_HEAD(tmp_list);
1016         list_splice_init(&session->s_cap_releases, &tmp_list);
1017         session->s_num_cap_releases = 0;
1018         spin_unlock(&session->s_cap_lock);
1019
1020         dout("cleanup_cap_releases mds%d\n", session->s_mds);
1021         while (!list_empty(&tmp_list)) {
1022                 struct ceph_cap *cap;
1023                 /* zero out the in-progress message */
1024                 cap = list_first_entry(&tmp_list,
1025                                         struct ceph_cap, session_caps);
1026                 list_del(&cap->session_caps);
1027                 ceph_put_cap(mdsc, cap);
1028         }
1029 }
1030
1031 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1032                                      struct ceph_mds_session *session)
1033 {
1034         struct ceph_mds_request *req;
1035         struct rb_node *p;
1036
1037         dout("cleanup_session_requests mds%d\n", session->s_mds);
1038         mutex_lock(&mdsc->mutex);
1039         while (!list_empty(&session->s_unsafe)) {
1040                 req = list_first_entry(&session->s_unsafe,
1041                                        struct ceph_mds_request, r_unsafe_item);
1042                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1043                                     req->r_tid);
1044                 __unregister_request(mdsc, req);
1045         }
1046         /* zero r_attempts, so kick_requests() will re-send requests */
1047         p = rb_first(&mdsc->request_tree);
1048         while (p) {
1049                 req = rb_entry(p, struct ceph_mds_request, r_node);
1050                 p = rb_next(p);
1051                 if (req->r_session &&
1052                     req->r_session->s_mds == session->s_mds)
1053                         req->r_attempts = 0;
1054         }
1055         mutex_unlock(&mdsc->mutex);
1056 }
1057
1058 /*
1059  * Helper to safely iterate over all caps associated with a session, with
1060  * special care taken to handle a racing __ceph_remove_cap().
1061  *
1062  * Caller must hold session s_mutex.
1063  */
1064 static int iterate_session_caps(struct ceph_mds_session *session,
1065                                  int (*cb)(struct inode *, struct ceph_cap *,
1066                                             void *), void *arg)
1067 {
1068         struct list_head *p;
1069         struct ceph_cap *cap;
1070         struct inode *inode, *last_inode = NULL;
1071         struct ceph_cap *old_cap = NULL;
1072         int ret;
1073
1074         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1075         spin_lock(&session->s_cap_lock);
1076         p = session->s_caps.next;
1077         while (p != &session->s_caps) {
1078                 cap = list_entry(p, struct ceph_cap, session_caps);
1079                 inode = igrab(&cap->ci->vfs_inode);
1080                 if (!inode) {
1081                         p = p->next;
1082                         continue;
1083                 }
1084                 session->s_cap_iterator = cap;
1085                 spin_unlock(&session->s_cap_lock);
1086
1087                 if (last_inode) {
1088                         iput(last_inode);
1089                         last_inode = NULL;
1090                 }
1091                 if (old_cap) {
1092                         ceph_put_cap(session->s_mdsc, old_cap);
1093                         old_cap = NULL;
1094                 }
1095
1096                 ret = cb(inode, cap, arg);
1097                 last_inode = inode;
1098
1099                 spin_lock(&session->s_cap_lock);
1100                 p = p->next;
1101                 if (cap->ci == NULL) {
1102                         dout("iterate_session_caps  finishing cap %p removal\n",
1103                              cap);
1104                         BUG_ON(cap->session != session);
1105                         cap->session = NULL;
1106                         list_del_init(&cap->session_caps);
1107                         session->s_nr_caps--;
1108                         if (cap->queue_release) {
1109                                 list_add_tail(&cap->session_caps,
1110                                               &session->s_cap_releases);
1111                                 session->s_num_cap_releases++;
1112                         } else {
1113                                 old_cap = cap;  /* put_cap it w/o locks held */
1114                         }
1115                 }
1116                 if (ret < 0)
1117                         goto out;
1118         }
1119         ret = 0;
1120 out:
1121         session->s_cap_iterator = NULL;
1122         spin_unlock(&session->s_cap_lock);
1123
1124         iput(last_inode);
1125         if (old_cap)
1126                 ceph_put_cap(session->s_mdsc, old_cap);
1127
1128         return ret;
1129 }
1130
1131 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1132                                   void *arg)
1133 {
1134         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1135         struct ceph_inode_info *ci = ceph_inode(inode);
1136         LIST_HEAD(to_remove);
1137         bool drop = false;
1138         bool invalidate = false;
1139
1140         dout("removing cap %p, ci is %p, inode is %p\n",
1141              cap, ci, &ci->vfs_inode);
1142         spin_lock(&ci->i_ceph_lock);
1143         __ceph_remove_cap(cap, false);
1144         if (!ci->i_auth_cap) {
1145                 struct ceph_cap_flush *cf;
1146                 struct ceph_mds_client *mdsc = fsc->mdsc;
1147
1148                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1149
1150                 if (ci->i_wrbuffer_ref > 0 &&
1151                     ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
1152                         invalidate = true;
1153
1154                 while (!list_empty(&ci->i_cap_flush_list)) {
1155                         cf = list_first_entry(&ci->i_cap_flush_list,
1156                                               struct ceph_cap_flush, i_list);
1157                         list_move(&cf->i_list, &to_remove);
1158                 }
1159
1160                 spin_lock(&mdsc->cap_dirty_lock);
1161
1162                 list_for_each_entry(cf, &to_remove, i_list)
1163                         list_del(&cf->g_list);
1164
1165                 if (!list_empty(&ci->i_dirty_item)) {
1166                         pr_warn_ratelimited(
1167                                 " dropping dirty %s state for %p %lld\n",
1168                                 ceph_cap_string(ci->i_dirty_caps),
1169                                 inode, ceph_ino(inode));
1170                         ci->i_dirty_caps = 0;
1171                         list_del_init(&ci->i_dirty_item);
1172                         drop = true;
1173                 }
1174                 if (!list_empty(&ci->i_flushing_item)) {
1175                         pr_warn_ratelimited(
1176                                 " dropping dirty+flushing %s state for %p %lld\n",
1177                                 ceph_cap_string(ci->i_flushing_caps),
1178                                 inode, ceph_ino(inode));
1179                         ci->i_flushing_caps = 0;
1180                         list_del_init(&ci->i_flushing_item);
1181                         mdsc->num_cap_flushing--;
1182                         drop = true;
1183                 }
1184                 spin_unlock(&mdsc->cap_dirty_lock);
1185
1186                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1187                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1188                         ci->i_prealloc_cap_flush = NULL;
1189                 }
1190         }
1191         spin_unlock(&ci->i_ceph_lock);
1192         while (!list_empty(&to_remove)) {
1193                 struct ceph_cap_flush *cf;
1194                 cf = list_first_entry(&to_remove,
1195                                       struct ceph_cap_flush, i_list);
1196                 list_del(&cf->i_list);
1197                 ceph_free_cap_flush(cf);
1198         }
1199
1200         wake_up_all(&ci->i_cap_wq);
1201         if (invalidate)
1202                 ceph_queue_invalidate(inode);
1203         if (drop)
1204                 iput(inode);
1205         return 0;
1206 }
1207
1208 /*
1209  * caller must hold session s_mutex
1210  */
1211 static void remove_session_caps(struct ceph_mds_session *session)
1212 {
1213         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1214         struct super_block *sb = fsc->sb;
1215         dout("remove_session_caps on %p\n", session);
1216         iterate_session_caps(session, remove_session_caps_cb, fsc);
1217
1218         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1219
1220         spin_lock(&session->s_cap_lock);
1221         if (session->s_nr_caps > 0) {
1222                 struct inode *inode;
1223                 struct ceph_cap *cap, *prev = NULL;
1224                 struct ceph_vino vino;
1225                 /*
1226                  * iterate_session_caps() skips inodes that are being
1227                  * deleted, we need to wait until deletions are complete.
1228                  * __wait_on_freeing_inode() is designed for the job,
1229                  * but it is not exported, so use lookup inode function
1230                  * to access it.
1231                  */
1232                 while (!list_empty(&session->s_caps)) {
1233                         cap = list_entry(session->s_caps.next,
1234                                          struct ceph_cap, session_caps);
1235                         if (cap == prev)
1236                                 break;
1237                         prev = cap;
1238                         vino = cap->ci->i_vino;
1239                         spin_unlock(&session->s_cap_lock);
1240
1241                         inode = ceph_find_inode(sb, vino);
1242                         iput(inode);
1243
1244                         spin_lock(&session->s_cap_lock);
1245                 }
1246         }
1247
1248         // drop cap expires and unlock s_cap_lock
1249         cleanup_cap_releases(session->s_mdsc, session);
1250
1251         BUG_ON(session->s_nr_caps > 0);
1252         BUG_ON(!list_empty(&session->s_cap_flushing));
1253 }
1254
1255 /*
1256  * wake up any threads waiting on this session's caps.  if the cap is
1257  * old (didn't get renewed on the client reconnect), remove it now.
1258  *
1259  * caller must hold s_mutex.
1260  */
1261 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1262                               void *arg)
1263 {
1264         struct ceph_inode_info *ci = ceph_inode(inode);
1265
1266         if (arg) {
1267                 spin_lock(&ci->i_ceph_lock);
1268                 ci->i_wanted_max_size = 0;
1269                 ci->i_requested_max_size = 0;
1270                 spin_unlock(&ci->i_ceph_lock);
1271         }
1272         wake_up_all(&ci->i_cap_wq);
1273         return 0;
1274 }
1275
1276 static void wake_up_session_caps(struct ceph_mds_session *session,
1277                                  int reconnect)
1278 {
1279         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1280         iterate_session_caps(session, wake_up_session_cb,
1281                              (void *)(unsigned long)reconnect);
1282 }
1283
1284 /*
1285  * Send periodic message to MDS renewing all currently held caps.  The
1286  * ack will reset the expiration for all caps from this session.
1287  *
1288  * caller holds s_mutex
1289  */
1290 static int send_renew_caps(struct ceph_mds_client *mdsc,
1291                            struct ceph_mds_session *session)
1292 {
1293         struct ceph_msg *msg;
1294         int state;
1295
1296         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1297             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1298                 pr_info("mds%d caps stale\n", session->s_mds);
1299         session->s_renew_requested = jiffies;
1300
1301         /* do not try to renew caps until a recovering mds has reconnected
1302          * with its clients. */
1303         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1304         if (state < CEPH_MDS_STATE_RECONNECT) {
1305                 dout("send_renew_caps ignoring mds%d (%s)\n",
1306                      session->s_mds, ceph_mds_state_name(state));
1307                 return 0;
1308         }
1309
1310         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1311                 ceph_mds_state_name(state));
1312         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1313                                  ++session->s_renew_seq);
1314         if (!msg)
1315                 return -ENOMEM;
1316         ceph_con_send(&session->s_con, msg);
1317         return 0;
1318 }
1319
1320 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1321                              struct ceph_mds_session *session, u64 seq)
1322 {
1323         struct ceph_msg *msg;
1324
1325         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1326              session->s_mds, ceph_session_state_name(session->s_state), seq);
1327         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1328         if (!msg)
1329                 return -ENOMEM;
1330         ceph_con_send(&session->s_con, msg);
1331         return 0;
1332 }
1333
1334
1335 /*
1336  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1337  *
1338  * Called under session->s_mutex
1339  */
1340 static void renewed_caps(struct ceph_mds_client *mdsc,
1341                          struct ceph_mds_session *session, int is_renew)
1342 {
1343         int was_stale;
1344         int wake = 0;
1345
1346         spin_lock(&session->s_cap_lock);
1347         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1348
1349         session->s_cap_ttl = session->s_renew_requested +
1350                 mdsc->mdsmap->m_session_timeout*HZ;
1351
1352         if (was_stale) {
1353                 if (time_before(jiffies, session->s_cap_ttl)) {
1354                         pr_info("mds%d caps renewed\n", session->s_mds);
1355                         wake = 1;
1356                 } else {
1357                         pr_info("mds%d caps still stale\n", session->s_mds);
1358                 }
1359         }
1360         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1361              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1362              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1363         spin_unlock(&session->s_cap_lock);
1364
1365         if (wake)
1366                 wake_up_session_caps(session, 0);
1367 }
1368
1369 /*
1370  * send a session close request
1371  */
1372 static int request_close_session(struct ceph_mds_client *mdsc,
1373                                  struct ceph_mds_session *session)
1374 {
1375         struct ceph_msg *msg;
1376
1377         dout("request_close_session mds%d state %s seq %lld\n",
1378              session->s_mds, ceph_session_state_name(session->s_state),
1379              session->s_seq);
1380         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1381         if (!msg)
1382                 return -ENOMEM;
1383         ceph_con_send(&session->s_con, msg);
1384         return 1;
1385 }
1386
1387 /*
1388  * Called with s_mutex held.
1389  */
1390 static int __close_session(struct ceph_mds_client *mdsc,
1391                          struct ceph_mds_session *session)
1392 {
1393         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1394                 return 0;
1395         session->s_state = CEPH_MDS_SESSION_CLOSING;
1396         return request_close_session(mdsc, session);
1397 }
1398
1399 /*
1400  * Trim old(er) caps.
1401  *
1402  * Because we can't cache an inode without one or more caps, we do
1403  * this indirectly: if a cap is unused, we prune its aliases, at which
1404  * point the inode will hopefully get dropped to.
1405  *
1406  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1407  * memory pressure from the MDS, though, so it needn't be perfect.
1408  */
1409 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1410 {
1411         struct ceph_mds_session *session = arg;
1412         struct ceph_inode_info *ci = ceph_inode(inode);
1413         int used, wanted, oissued, mine;
1414
1415         if (session->s_trim_caps <= 0)
1416                 return -1;
1417
1418         spin_lock(&ci->i_ceph_lock);
1419         mine = cap->issued | cap->implemented;
1420         used = __ceph_caps_used(ci);
1421         wanted = __ceph_caps_file_wanted(ci);
1422         oissued = __ceph_caps_issued_other(ci, cap);
1423
1424         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1425              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1426              ceph_cap_string(used), ceph_cap_string(wanted));
1427         if (cap == ci->i_auth_cap) {
1428                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1429                     !list_empty(&ci->i_cap_snaps))
1430                         goto out;
1431                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1432                         goto out;
1433         }
1434         /* The inode has cached pages, but it's no longer used.
1435          * we can safely drop it */
1436         if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1437             !(oissued & CEPH_CAP_FILE_CACHE)) {
1438           used = 0;
1439           oissued = 0;
1440         }
1441         if ((used | wanted) & ~oissued & mine)
1442                 goto out;   /* we need these caps */
1443
1444         session->s_trim_caps--;
1445         if (oissued) {
1446                 /* we aren't the only cap.. just remove us */
1447                 __ceph_remove_cap(cap, true);
1448         } else {
1449                 /* try dropping referring dentries */
1450                 spin_unlock(&ci->i_ceph_lock);
1451                 d_prune_aliases(inode);
1452                 dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
1453                      inode, cap, atomic_read(&inode->i_count));
1454                 return 0;
1455         }
1456
1457 out:
1458         spin_unlock(&ci->i_ceph_lock);
1459         return 0;
1460 }
1461
1462 /*
1463  * Trim session cap count down to some max number.
1464  */
1465 static int trim_caps(struct ceph_mds_client *mdsc,
1466                      struct ceph_mds_session *session,
1467                      int max_caps)
1468 {
1469         int trim_caps = session->s_nr_caps - max_caps;
1470
1471         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1472              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1473         if (trim_caps > 0) {
1474                 session->s_trim_caps = trim_caps;
1475                 iterate_session_caps(session, trim_caps_cb, session);
1476                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1477                      session->s_mds, session->s_nr_caps, max_caps,
1478                         trim_caps - session->s_trim_caps);
1479                 session->s_trim_caps = 0;
1480         }
1481
1482         ceph_send_cap_releases(mdsc, session);
1483         return 0;
1484 }
1485
1486 static int check_caps_flush(struct ceph_mds_client *mdsc,
1487                             u64 want_flush_tid)
1488 {
1489         int ret = 1;
1490
1491         spin_lock(&mdsc->cap_dirty_lock);
1492         if (!list_empty(&mdsc->cap_flush_list)) {
1493                 struct ceph_cap_flush *cf =
1494                         list_first_entry(&mdsc->cap_flush_list,
1495                                          struct ceph_cap_flush, g_list);
1496                 if (cf->tid <= want_flush_tid) {
1497                         dout("check_caps_flush still flushing tid "
1498                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1499                         ret = 0;
1500                 }
1501         }
1502         spin_unlock(&mdsc->cap_dirty_lock);
1503         return ret;
1504 }
1505
1506 /*
1507  * flush all dirty inode data to disk.
1508  *
1509  * returns true if we've flushed through want_flush_tid
1510  */
1511 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1512                             u64 want_flush_tid)
1513 {
1514         dout("check_caps_flush want %llu\n", want_flush_tid);
1515
1516         wait_event(mdsc->cap_flushing_wq,
1517                    check_caps_flush(mdsc, want_flush_tid));
1518
1519         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1520 }
1521
1522 /*
1523  * called under s_mutex
1524  */
1525 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1526                             struct ceph_mds_session *session)
1527 {
1528         struct ceph_msg *msg = NULL;
1529         struct ceph_mds_cap_release *head;
1530         struct ceph_mds_cap_item *item;
1531         struct ceph_cap *cap;
1532         LIST_HEAD(tmp_list);
1533         int num_cap_releases;
1534
1535         spin_lock(&session->s_cap_lock);
1536 again:
1537         list_splice_init(&session->s_cap_releases, &tmp_list);
1538         num_cap_releases = session->s_num_cap_releases;
1539         session->s_num_cap_releases = 0;
1540         spin_unlock(&session->s_cap_lock);
1541
1542         while (!list_empty(&tmp_list)) {
1543                 if (!msg) {
1544                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1545                                         PAGE_SIZE, GFP_NOFS, false);
1546                         if (!msg)
1547                                 goto out_err;
1548                         head = msg->front.iov_base;
1549                         head->num = cpu_to_le32(0);
1550                         msg->front.iov_len = sizeof(*head);
1551                 }
1552                 cap = list_first_entry(&tmp_list, struct ceph_cap,
1553                                         session_caps);
1554                 list_del(&cap->session_caps);
1555                 num_cap_releases--;
1556
1557                 head = msg->front.iov_base;
1558                 le32_add_cpu(&head->num, 1);
1559                 item = msg->front.iov_base + msg->front.iov_len;
1560                 item->ino = cpu_to_le64(cap->cap_ino);
1561                 item->cap_id = cpu_to_le64(cap->cap_id);
1562                 item->migrate_seq = cpu_to_le32(cap->mseq);
1563                 item->seq = cpu_to_le32(cap->issue_seq);
1564                 msg->front.iov_len += sizeof(*item);
1565
1566                 ceph_put_cap(mdsc, cap);
1567
1568                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1569                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1570                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1571                         ceph_con_send(&session->s_con, msg);
1572                         msg = NULL;
1573                 }
1574         }
1575
1576         BUG_ON(num_cap_releases != 0);
1577
1578         spin_lock(&session->s_cap_lock);
1579         if (!list_empty(&session->s_cap_releases))
1580                 goto again;
1581         spin_unlock(&session->s_cap_lock);
1582
1583         if (msg) {
1584                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1585                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1586                 ceph_con_send(&session->s_con, msg);
1587         }
1588         return;
1589 out_err:
1590         pr_err("send_cap_releases mds%d, failed to allocate message\n",
1591                 session->s_mds);
1592         spin_lock(&session->s_cap_lock);
1593         list_splice(&tmp_list, &session->s_cap_releases);
1594         session->s_num_cap_releases += num_cap_releases;
1595         spin_unlock(&session->s_cap_lock);
1596 }
1597
1598 /*
1599  * requests
1600  */
1601
1602 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1603                                     struct inode *dir)
1604 {
1605         struct ceph_inode_info *ci = ceph_inode(dir);
1606         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1607         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1608         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
1609         int order, num_entries;
1610
1611         spin_lock(&ci->i_ceph_lock);
1612         num_entries = ci->i_files + ci->i_subdirs;
1613         spin_unlock(&ci->i_ceph_lock);
1614         num_entries = max(num_entries, 1);
1615         num_entries = min(num_entries, opt->max_readdir);
1616
1617         order = get_order(size * num_entries);
1618         while (order >= 0) {
1619                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
1620                                                              __GFP_NOWARN,
1621                                                              order);
1622                 if (rinfo->dir_entries)
1623                         break;
1624                 order--;
1625         }
1626         if (!rinfo->dir_entries)
1627                 return -ENOMEM;
1628
1629         num_entries = (PAGE_SIZE << order) / size;
1630         num_entries = min(num_entries, opt->max_readdir);
1631
1632         rinfo->dir_buf_size = PAGE_SIZE << order;
1633         req->r_num_caps = num_entries + 1;
1634         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1635         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1636         return 0;
1637 }
1638
1639 /*
1640  * Create an mds request.
1641  */
1642 struct ceph_mds_request *
1643 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1644 {
1645         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1646
1647         if (!req)
1648                 return ERR_PTR(-ENOMEM);
1649
1650         mutex_init(&req->r_fill_mutex);
1651         req->r_mdsc = mdsc;
1652         req->r_started = jiffies;
1653         req->r_resend_mds = -1;
1654         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1655         INIT_LIST_HEAD(&req->r_unsafe_target_item);
1656         req->r_fmode = -1;
1657         kref_init(&req->r_kref);
1658         RB_CLEAR_NODE(&req->r_node);
1659         INIT_LIST_HEAD(&req->r_wait);
1660         init_completion(&req->r_completion);
1661         init_completion(&req->r_safe_completion);
1662         INIT_LIST_HEAD(&req->r_unsafe_item);
1663
1664         req->r_stamp = current_fs_time(mdsc->fsc->sb);
1665
1666         req->r_op = op;
1667         req->r_direct_mode = mode;
1668         return req;
1669 }
1670
1671 /*
1672  * return oldest (lowest) request, tid in request tree, 0 if none.
1673  *
1674  * called under mdsc->mutex.
1675  */
1676 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1677 {
1678         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1679                 return NULL;
1680         return rb_entry(rb_first(&mdsc->request_tree),
1681                         struct ceph_mds_request, r_node);
1682 }
1683
1684 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1685 {
1686         return mdsc->oldest_tid;
1687 }
1688
1689 /*
1690  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1691  * on build_path_from_dentry in fs/cifs/dir.c.
1692  *
1693  * If @stop_on_nosnap, generate path relative to the first non-snapped
1694  * inode.
1695  *
1696  * Encode hidden .snap dirs as a double /, i.e.
1697  *   foo/.snap/bar -> foo//bar
1698  */
1699 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1700                            int stop_on_nosnap)
1701 {
1702         struct dentry *temp;
1703         char *path;
1704         int len, pos;
1705         unsigned seq;
1706
1707         if (dentry == NULL)
1708                 return ERR_PTR(-EINVAL);
1709
1710 retry:
1711         len = 0;
1712         seq = read_seqbegin(&rename_lock);
1713         rcu_read_lock();
1714         for (temp = dentry; !IS_ROOT(temp);) {
1715                 struct inode *inode = d_inode(temp);
1716                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1717                         len++;  /* slash only */
1718                 else if (stop_on_nosnap && inode &&
1719                          ceph_snap(inode) == CEPH_NOSNAP)
1720                         break;
1721                 else
1722                         len += 1 + temp->d_name.len;
1723                 temp = temp->d_parent;
1724         }
1725         rcu_read_unlock();
1726         if (len)
1727                 len--;  /* no leading '/' */
1728
1729         path = kmalloc(len+1, GFP_NOFS);
1730         if (path == NULL)
1731                 return ERR_PTR(-ENOMEM);
1732         pos = len;
1733         path[pos] = 0;  /* trailing null */
1734         rcu_read_lock();
1735         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1736                 struct inode *inode;
1737
1738                 spin_lock(&temp->d_lock);
1739                 inode = d_inode(temp);
1740                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1741                         dout("build_path path+%d: %p SNAPDIR\n",
1742                              pos, temp);
1743                 } else if (stop_on_nosnap && inode &&
1744                            ceph_snap(inode) == CEPH_NOSNAP) {
1745                         spin_unlock(&temp->d_lock);
1746                         break;
1747                 } else {
1748                         pos -= temp->d_name.len;
1749                         if (pos < 0) {
1750                                 spin_unlock(&temp->d_lock);
1751                                 break;
1752                         }
1753                         strncpy(path + pos, temp->d_name.name,
1754                                 temp->d_name.len);
1755                 }
1756                 spin_unlock(&temp->d_lock);
1757                 if (pos)
1758                         path[--pos] = '/';
1759                 temp = temp->d_parent;
1760         }
1761         rcu_read_unlock();
1762         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1763                 pr_err("build_path did not end path lookup where "
1764                        "expected, namelen is %d, pos is %d\n", len, pos);
1765                 /* presumably this is only possible if racing with a
1766                    rename of one of the parent directories (we can not
1767                    lock the dentries above us to prevent this, but
1768                    retrying should be harmless) */
1769                 kfree(path);
1770                 goto retry;
1771         }
1772
1773         *base = ceph_ino(d_inode(temp));
1774         *plen = len;
1775         dout("build_path on %p %d built %llx '%.*s'\n",
1776              dentry, d_count(dentry), *base, len, path);
1777         return path;
1778 }
1779
1780 static int build_dentry_path(struct dentry *dentry,
1781                              const char **ppath, int *ppathlen, u64 *pino,
1782                              int *pfreepath)
1783 {
1784         char *path;
1785
1786         if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
1787                 *pino = ceph_ino(d_inode(dentry->d_parent));
1788                 *ppath = dentry->d_name.name;
1789                 *ppathlen = dentry->d_name.len;
1790                 return 0;
1791         }
1792         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1793         if (IS_ERR(path))
1794                 return PTR_ERR(path);
1795         *ppath = path;
1796         *pfreepath = 1;
1797         return 0;
1798 }
1799
1800 static int build_inode_path(struct inode *inode,
1801                             const char **ppath, int *ppathlen, u64 *pino,
1802                             int *pfreepath)
1803 {
1804         struct dentry *dentry;
1805         char *path;
1806
1807         if (ceph_snap(inode) == CEPH_NOSNAP) {
1808                 *pino = ceph_ino(inode);
1809                 *ppathlen = 0;
1810                 return 0;
1811         }
1812         dentry = d_find_alias(inode);
1813         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1814         dput(dentry);
1815         if (IS_ERR(path))
1816                 return PTR_ERR(path);
1817         *ppath = path;
1818         *pfreepath = 1;
1819         return 0;
1820 }
1821
1822 /*
1823  * request arguments may be specified via an inode *, a dentry *, or
1824  * an explicit ino+path.
1825  */
1826 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1827                                   const char *rpath, u64 rino,
1828                                   const char **ppath, int *pathlen,
1829                                   u64 *ino, int *freepath)
1830 {
1831         int r = 0;
1832
1833         if (rinode) {
1834                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1835                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1836                      ceph_snap(rinode));
1837         } else if (rdentry) {
1838                 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1839                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1840                      *ppath);
1841         } else if (rpath || rino) {
1842                 *ino = rino;
1843                 *ppath = rpath;
1844                 *pathlen = rpath ? strlen(rpath) : 0;
1845                 dout(" path %.*s\n", *pathlen, rpath);
1846         }
1847
1848         return r;
1849 }
1850
1851 /*
1852  * called under mdsc->mutex
1853  */
1854 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1855                                                struct ceph_mds_request *req,
1856                                                int mds, bool drop_cap_releases)
1857 {
1858         struct ceph_msg *msg;
1859         struct ceph_mds_request_head *head;
1860         const char *path1 = NULL;
1861         const char *path2 = NULL;
1862         u64 ino1 = 0, ino2 = 0;
1863         int pathlen1 = 0, pathlen2 = 0;
1864         int freepath1 = 0, freepath2 = 0;
1865         int len;
1866         u16 releases;
1867         void *p, *end;
1868         int ret;
1869
1870         ret = set_request_path_attr(req->r_inode, req->r_dentry,
1871                               req->r_path1, req->r_ino1.ino,
1872                               &path1, &pathlen1, &ino1, &freepath1);
1873         if (ret < 0) {
1874                 msg = ERR_PTR(ret);
1875                 goto out;
1876         }
1877
1878         ret = set_request_path_attr(NULL, req->r_old_dentry,
1879                               req->r_path2, req->r_ino2.ino,
1880                               &path2, &pathlen2, &ino2, &freepath2);
1881         if (ret < 0) {
1882                 msg = ERR_PTR(ret);
1883                 goto out_free1;
1884         }
1885
1886         len = sizeof(*head) +
1887                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1888                 sizeof(struct ceph_timespec);
1889
1890         /* calculate (max) length for cap releases */
1891         len += sizeof(struct ceph_mds_request_release) *
1892                 (!!req->r_inode_drop + !!req->r_dentry_drop +
1893                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1894         if (req->r_dentry_drop)
1895                 len += req->r_dentry->d_name.len;
1896         if (req->r_old_dentry_drop)
1897                 len += req->r_old_dentry->d_name.len;
1898
1899         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1900         if (!msg) {
1901                 msg = ERR_PTR(-ENOMEM);
1902                 goto out_free2;
1903         }
1904
1905         msg->hdr.version = cpu_to_le16(2);
1906         msg->hdr.tid = cpu_to_le64(req->r_tid);
1907
1908         head = msg->front.iov_base;
1909         p = msg->front.iov_base + sizeof(*head);
1910         end = msg->front.iov_base + msg->front.iov_len;
1911
1912         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1913         head->op = cpu_to_le32(req->r_op);
1914         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1915         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1916         head->args = req->r_args;
1917
1918         ceph_encode_filepath(&p, end, ino1, path1);
1919         ceph_encode_filepath(&p, end, ino2, path2);
1920
1921         /* make note of release offset, in case we need to replay */
1922         req->r_request_release_offset = p - msg->front.iov_base;
1923
1924         /* cap releases */
1925         releases = 0;
1926         if (req->r_inode_drop)
1927                 releases += ceph_encode_inode_release(&p,
1928                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
1929                       mds, req->r_inode_drop, req->r_inode_unless, 0);
1930         if (req->r_dentry_drop)
1931                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1932                        mds, req->r_dentry_drop, req->r_dentry_unless);
1933         if (req->r_old_dentry_drop)
1934                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1935                        mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1936         if (req->r_old_inode_drop)
1937                 releases += ceph_encode_inode_release(&p,
1938                       d_inode(req->r_old_dentry),
1939                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1940
1941         if (drop_cap_releases) {
1942                 releases = 0;
1943                 p = msg->front.iov_base + req->r_request_release_offset;
1944         }
1945
1946         head->num_releases = cpu_to_le16(releases);
1947
1948         /* time stamp */
1949         {
1950                 struct ceph_timespec ts;
1951                 ceph_encode_timespec(&ts, &req->r_stamp);
1952                 ceph_encode_copy(&p, &ts, sizeof(ts));
1953         }
1954
1955         BUG_ON(p > end);
1956         msg->front.iov_len = p - msg->front.iov_base;
1957         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1958
1959         if (req->r_pagelist) {
1960                 struct ceph_pagelist *pagelist = req->r_pagelist;
1961                 atomic_inc(&pagelist->refcnt);
1962                 ceph_msg_data_add_pagelist(msg, pagelist);
1963                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
1964         } else {
1965                 msg->hdr.data_len = 0;
1966         }
1967
1968         msg->hdr.data_off = cpu_to_le16(0);
1969
1970 out_free2:
1971         if (freepath2)
1972                 kfree((char *)path2);
1973 out_free1:
1974         if (freepath1)
1975                 kfree((char *)path1);
1976 out:
1977         return msg;
1978 }
1979
1980 /*
1981  * called under mdsc->mutex if error, under no mutex if
1982  * success.
1983  */
1984 static void complete_request(struct ceph_mds_client *mdsc,
1985                              struct ceph_mds_request *req)
1986 {
1987         if (req->r_callback)
1988                 req->r_callback(mdsc, req);
1989         else
1990                 complete_all(&req->r_completion);
1991 }
1992
1993 /*
1994  * called under mdsc->mutex
1995  */
1996 static int __prepare_send_request(struct ceph_mds_client *mdsc,
1997                                   struct ceph_mds_request *req,
1998                                   int mds, bool drop_cap_releases)
1999 {
2000         struct ceph_mds_request_head *rhead;
2001         struct ceph_msg *msg;
2002         int flags = 0;
2003
2004         req->r_attempts++;
2005         if (req->r_inode) {
2006                 struct ceph_cap *cap =
2007                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2008
2009                 if (cap)
2010                         req->r_sent_on_mseq = cap->mseq;
2011                 else
2012                         req->r_sent_on_mseq = -1;
2013         }
2014         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2015              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2016
2017         if (req->r_got_unsafe) {
2018                 void *p;
2019                 /*
2020                  * Replay.  Do not regenerate message (and rebuild
2021                  * paths, etc.); just use the original message.
2022                  * Rebuilding paths will break for renames because
2023                  * d_move mangles the src name.
2024                  */
2025                 msg = req->r_request;
2026                 rhead = msg->front.iov_base;
2027
2028                 flags = le32_to_cpu(rhead->flags);
2029                 flags |= CEPH_MDS_FLAG_REPLAY;
2030                 rhead->flags = cpu_to_le32(flags);
2031
2032                 if (req->r_target_inode)
2033                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2034
2035                 rhead->num_retry = req->r_attempts - 1;
2036
2037                 /* remove cap/dentry releases from message */
2038                 rhead->num_releases = 0;
2039
2040                 /* time stamp */
2041                 p = msg->front.iov_base + req->r_request_release_offset;
2042                 {
2043                         struct ceph_timespec ts;
2044                         ceph_encode_timespec(&ts, &req->r_stamp);
2045                         ceph_encode_copy(&p, &ts, sizeof(ts));
2046                 }
2047
2048                 msg->front.iov_len = p - msg->front.iov_base;
2049                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2050                 return 0;
2051         }
2052
2053         if (req->r_request) {
2054                 ceph_msg_put(req->r_request);
2055                 req->r_request = NULL;
2056         }
2057         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2058         if (IS_ERR(msg)) {
2059                 req->r_err = PTR_ERR(msg);
2060                 return PTR_ERR(msg);
2061         }
2062         req->r_request = msg;
2063
2064         rhead = msg->front.iov_base;
2065         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2066         if (req->r_got_unsafe)
2067                 flags |= CEPH_MDS_FLAG_REPLAY;
2068         if (req->r_locked_dir)
2069                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2070         rhead->flags = cpu_to_le32(flags);
2071         rhead->num_fwd = req->r_num_fwd;
2072         rhead->num_retry = req->r_attempts - 1;
2073         rhead->ino = 0;
2074
2075         dout(" r_locked_dir = %p\n", req->r_locked_dir);
2076         return 0;
2077 }
2078
2079 /*
2080  * send request, or put it on the appropriate wait list.
2081  */
2082 static int __do_request(struct ceph_mds_client *mdsc,
2083                         struct ceph_mds_request *req)
2084 {
2085         struct ceph_mds_session *session = NULL;
2086         int mds = -1;
2087         int err = 0;
2088
2089         if (req->r_err || req->r_got_result) {
2090                 if (req->r_aborted)
2091                         __unregister_request(mdsc, req);
2092                 goto out;
2093         }
2094
2095         if (req->r_timeout &&
2096             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2097                 dout("do_request timed out\n");
2098                 err = -EIO;
2099                 goto finish;
2100         }
2101         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2102                 dout("do_request forced umount\n");
2103                 err = -EIO;
2104                 goto finish;
2105         }
2106         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2107                 if (mdsc->mdsmap_err) {
2108                         err = mdsc->mdsmap_err;
2109                         dout("do_request mdsmap err %d\n", err);
2110                         goto finish;
2111                 }
2112                 if (mdsc->mdsmap->m_epoch == 0) {
2113                         dout("do_request no mdsmap, waiting for map\n");
2114                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2115                         goto finish;
2116                 }
2117                 if (!(mdsc->fsc->mount_options->flags &
2118                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2119                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2120                         err = -ENOENT;
2121                         pr_info("probably no mds server is up\n");
2122                         goto finish;
2123                 }
2124         }
2125
2126         put_request_session(req);
2127
2128         mds = __choose_mds(mdsc, req);
2129         if (mds < 0 ||
2130             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2131                 dout("do_request no mds or not active, waiting for map\n");
2132                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2133                 goto out;
2134         }
2135
2136         /* get, open session */
2137         session = __ceph_lookup_mds_session(mdsc, mds);
2138         if (!session) {
2139                 session = register_session(mdsc, mds);
2140                 if (IS_ERR(session)) {
2141                         err = PTR_ERR(session);
2142                         goto finish;
2143                 }
2144         }
2145         req->r_session = get_session(session);
2146
2147         dout("do_request mds%d session %p state %s\n", mds, session,
2148              ceph_session_state_name(session->s_state));
2149         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2150             session->s_state != CEPH_MDS_SESSION_HUNG) {
2151                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2152                         err = -EACCES;
2153                         goto out_session;
2154                 }
2155                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2156                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2157                         __open_session(mdsc, session);
2158                 list_add(&req->r_wait, &session->s_waiting);
2159                 goto out_session;
2160         }
2161
2162         /* send request */
2163         req->r_resend_mds = -1;   /* forget any previous mds hint */
2164
2165         if (req->r_request_started == 0)   /* note request start time */
2166                 req->r_request_started = jiffies;
2167
2168         err = __prepare_send_request(mdsc, req, mds, false);
2169         if (!err) {
2170                 ceph_msg_get(req->r_request);
2171                 ceph_con_send(&session->s_con, req->r_request);
2172         }
2173
2174 out_session:
2175         ceph_put_mds_session(session);
2176 finish:
2177         if (err) {
2178                 dout("__do_request early error %d\n", err);
2179                 req->r_err = err;
2180                 complete_request(mdsc, req);
2181                 __unregister_request(mdsc, req);
2182         }
2183 out:
2184         return err;
2185 }
2186
2187 /*
2188  * called under mdsc->mutex
2189  */
2190 static void __wake_requests(struct ceph_mds_client *mdsc,
2191                             struct list_head *head)
2192 {
2193         struct ceph_mds_request *req;
2194         LIST_HEAD(tmp_list);
2195
2196         list_splice_init(head, &tmp_list);
2197
2198         while (!list_empty(&tmp_list)) {
2199                 req = list_entry(tmp_list.next,
2200                                  struct ceph_mds_request, r_wait);
2201                 list_del_init(&req->r_wait);
2202                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2203                 __do_request(mdsc, req);
2204         }
2205 }
2206
2207 /*
2208  * Wake up threads with requests pending for @mds, so that they can
2209  * resubmit their requests to a possibly different mds.
2210  */
2211 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2212 {
2213         struct ceph_mds_request *req;
2214         struct rb_node *p = rb_first(&mdsc->request_tree);
2215
2216         dout("kick_requests mds%d\n", mds);
2217         while (p) {
2218                 req = rb_entry(p, struct ceph_mds_request, r_node);
2219                 p = rb_next(p);
2220                 if (req->r_got_unsafe)
2221                         continue;
2222                 if (req->r_attempts > 0)
2223                         continue; /* only new requests */
2224                 if (req->r_session &&
2225                     req->r_session->s_mds == mds) {
2226                         dout(" kicking tid %llu\n", req->r_tid);
2227                         list_del_init(&req->r_wait);
2228                         __do_request(mdsc, req);
2229                 }
2230         }
2231 }
2232
2233 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2234                               struct ceph_mds_request *req)
2235 {
2236         dout("submit_request on %p\n", req);
2237         mutex_lock(&mdsc->mutex);
2238         __register_request(mdsc, req, NULL);
2239         __do_request(mdsc, req);
2240         mutex_unlock(&mdsc->mutex);
2241 }
2242
2243 /*
2244  * Synchrously perform an mds request.  Take care of all of the
2245  * session setup, forwarding, retry details.
2246  */
2247 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2248                          struct inode *dir,
2249                          struct ceph_mds_request *req)
2250 {
2251         int err;
2252
2253         dout("do_request on %p\n", req);
2254
2255         /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2256         if (req->r_inode)
2257                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2258         if (req->r_locked_dir)
2259                 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
2260         if (req->r_old_dentry_dir)
2261                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2262                                   CEPH_CAP_PIN);
2263
2264         /* issue */
2265         mutex_lock(&mdsc->mutex);
2266         __register_request(mdsc, req, dir);
2267         __do_request(mdsc, req);
2268
2269         if (req->r_err) {
2270                 err = req->r_err;
2271                 goto out;
2272         }
2273
2274         /* wait */
2275         mutex_unlock(&mdsc->mutex);
2276         dout("do_request waiting\n");
2277         if (!req->r_timeout && req->r_wait_for_completion) {
2278                 err = req->r_wait_for_completion(mdsc, req);
2279         } else {
2280                 long timeleft = wait_for_completion_killable_timeout(
2281                                         &req->r_completion,
2282                                         ceph_timeout_jiffies(req->r_timeout));
2283                 if (timeleft > 0)
2284                         err = 0;
2285                 else if (!timeleft)
2286                         err = -EIO;  /* timed out */
2287                 else
2288                         err = timeleft;  /* killed */
2289         }
2290         dout("do_request waited, got %d\n", err);
2291         mutex_lock(&mdsc->mutex);
2292
2293         /* only abort if we didn't race with a real reply */
2294         if (req->r_got_result) {
2295                 err = le32_to_cpu(req->r_reply_info.head->result);
2296         } else if (err < 0) {
2297                 dout("aborted request %lld with %d\n", req->r_tid, err);
2298
2299                 /*
2300                  * ensure we aren't running concurrently with
2301                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2302                  * rely on locks (dir mutex) held by our caller.
2303                  */
2304                 mutex_lock(&req->r_fill_mutex);
2305                 req->r_err = err;
2306                 req->r_aborted = true;
2307                 mutex_unlock(&req->r_fill_mutex);
2308
2309                 if (req->r_locked_dir &&
2310                     (req->r_op & CEPH_MDS_OP_WRITE))
2311                         ceph_invalidate_dir_request(req);
2312         } else {
2313                 err = req->r_err;
2314         }
2315
2316 out:
2317         mutex_unlock(&mdsc->mutex);
2318         dout("do_request %p done, result %d\n", req, err);
2319         return err;
2320 }
2321
2322 /*
2323  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2324  * namespace request.
2325  */
2326 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2327 {
2328         struct inode *inode = req->r_locked_dir;
2329
2330         dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2331
2332         ceph_dir_clear_complete(inode);
2333         if (req->r_dentry)
2334                 ceph_invalidate_dentry_lease(req->r_dentry);
2335         if (req->r_old_dentry)
2336                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2337 }
2338
2339 /*
2340  * Handle mds reply.
2341  *
2342  * We take the session mutex and parse and process the reply immediately.
2343  * This preserves the logical ordering of replies, capabilities, etc., sent
2344  * by the MDS as they are applied to our local cache.
2345  */
2346 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2347 {
2348         struct ceph_mds_client *mdsc = session->s_mdsc;
2349         struct ceph_mds_request *req;
2350         struct ceph_mds_reply_head *head = msg->front.iov_base;
2351         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2352         struct ceph_snap_realm *realm;
2353         u64 tid;
2354         int err, result;
2355         int mds = session->s_mds;
2356
2357         if (msg->front.iov_len < sizeof(*head)) {
2358                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2359                 ceph_msg_dump(msg);
2360                 return;
2361         }
2362
2363         /* get request, session */
2364         tid = le64_to_cpu(msg->hdr.tid);
2365         mutex_lock(&mdsc->mutex);
2366         req = lookup_get_request(mdsc, tid);
2367         if (!req) {
2368                 dout("handle_reply on unknown tid %llu\n", tid);
2369                 mutex_unlock(&mdsc->mutex);
2370                 return;
2371         }
2372         dout("handle_reply %p\n", req);
2373
2374         /* correct session? */
2375         if (req->r_session != session) {
2376                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2377                        " not mds%d\n", tid, session->s_mds,
2378                        req->r_session ? req->r_session->s_mds : -1);
2379                 mutex_unlock(&mdsc->mutex);
2380                 goto out;
2381         }
2382
2383         /* dup? */
2384         if ((req->r_got_unsafe && !head->safe) ||
2385             (req->r_got_safe && head->safe)) {
2386                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2387                            head->safe ? "safe" : "unsafe", tid, mds);
2388                 mutex_unlock(&mdsc->mutex);
2389                 goto out;
2390         }
2391         if (req->r_got_safe) {
2392                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2393                            tid, mds);
2394                 mutex_unlock(&mdsc->mutex);
2395                 goto out;
2396         }
2397
2398         result = le32_to_cpu(head->result);
2399
2400         /*
2401          * Handle an ESTALE
2402          * if we're not talking to the authority, send to them
2403          * if the authority has changed while we weren't looking,
2404          * send to new authority
2405          * Otherwise we just have to return an ESTALE
2406          */
2407         if (result == -ESTALE) {
2408                 dout("got ESTALE on request %llu", req->r_tid);
2409                 req->r_resend_mds = -1;
2410                 if (req->r_direct_mode != USE_AUTH_MDS) {
2411                         dout("not using auth, setting for that now");
2412                         req->r_direct_mode = USE_AUTH_MDS;
2413                         __do_request(mdsc, req);
2414                         mutex_unlock(&mdsc->mutex);
2415                         goto out;
2416                 } else  {
2417                         int mds = __choose_mds(mdsc, req);
2418                         if (mds >= 0 && mds != req->r_session->s_mds) {
2419                                 dout("but auth changed, so resending");
2420                                 __do_request(mdsc, req);
2421                                 mutex_unlock(&mdsc->mutex);
2422                                 goto out;
2423                         }
2424                 }
2425                 dout("have to return ESTALE on request %llu", req->r_tid);
2426         }
2427
2428
2429         if (head->safe) {
2430                 req->r_got_safe = true;
2431                 __unregister_request(mdsc, req);
2432
2433                 if (req->r_got_unsafe) {
2434                         /*
2435                          * We already handled the unsafe response, now do the
2436                          * cleanup.  No need to examine the response; the MDS
2437                          * doesn't include any result info in the safe
2438                          * response.  And even if it did, there is nothing
2439                          * useful we could do with a revised return value.
2440                          */
2441                         dout("got safe reply %llu, mds%d\n", tid, mds);
2442
2443                         /* last unsafe request during umount? */
2444                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2445                                 complete_all(&mdsc->safe_umount_waiters);
2446                         mutex_unlock(&mdsc->mutex);
2447                         goto out;
2448                 }
2449         } else {
2450                 req->r_got_unsafe = true;
2451                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2452                 if (req->r_unsafe_dir) {
2453                         struct ceph_inode_info *ci =
2454                                         ceph_inode(req->r_unsafe_dir);
2455                         spin_lock(&ci->i_unsafe_lock);
2456                         list_add_tail(&req->r_unsafe_dir_item,
2457                                       &ci->i_unsafe_dirops);
2458                         spin_unlock(&ci->i_unsafe_lock);
2459                 }
2460         }
2461
2462         dout("handle_reply tid %lld result %d\n", tid, result);
2463         rinfo = &req->r_reply_info;
2464         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2465         mutex_unlock(&mdsc->mutex);
2466
2467         mutex_lock(&session->s_mutex);
2468         if (err < 0) {
2469                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2470                 ceph_msg_dump(msg);
2471                 goto out_err;
2472         }
2473
2474         /* snap trace */
2475         realm = NULL;
2476         if (rinfo->snapblob_len) {
2477                 down_write(&mdsc->snap_rwsem);
2478                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2479                                 rinfo->snapblob + rinfo->snapblob_len,
2480                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2481                                 &realm);
2482                 downgrade_write(&mdsc->snap_rwsem);
2483         } else {
2484                 down_read(&mdsc->snap_rwsem);
2485         }
2486
2487         /* insert trace into our cache */
2488         mutex_lock(&req->r_fill_mutex);
2489         current->journal_info = req;
2490         err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2491         if (err == 0) {
2492                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2493                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2494                         ceph_readdir_prepopulate(req, req->r_session);
2495                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2496         }
2497         current->journal_info = NULL;
2498         mutex_unlock(&req->r_fill_mutex);
2499
2500         up_read(&mdsc->snap_rwsem);
2501         if (realm)
2502                 ceph_put_snap_realm(mdsc, realm);
2503
2504         if (err == 0 && req->r_got_unsafe && req->r_target_inode) {
2505                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2506                 spin_lock(&ci->i_unsafe_lock);
2507                 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2508                 spin_unlock(&ci->i_unsafe_lock);
2509         }
2510 out_err:
2511         mutex_lock(&mdsc->mutex);
2512         if (!req->r_aborted) {
2513                 if (err) {
2514                         req->r_err = err;
2515                 } else {
2516                         req->r_reply =  ceph_msg_get(msg);
2517                         req->r_got_result = true;
2518                 }
2519         } else {
2520                 dout("reply arrived after request %lld was aborted\n", tid);
2521         }
2522         mutex_unlock(&mdsc->mutex);
2523
2524         mutex_unlock(&session->s_mutex);
2525
2526         /* kick calling process */
2527         complete_request(mdsc, req);
2528 out:
2529         ceph_mdsc_put_request(req);
2530         return;
2531 }
2532
2533
2534
2535 /*
2536  * handle mds notification that our request has been forwarded.
2537  */
2538 static void handle_forward(struct ceph_mds_client *mdsc,
2539                            struct ceph_mds_session *session,
2540                            struct ceph_msg *msg)
2541 {
2542         struct ceph_mds_request *req;
2543         u64 tid = le64_to_cpu(msg->hdr.tid);
2544         u32 next_mds;
2545         u32 fwd_seq;
2546         int err = -EINVAL;
2547         void *p = msg->front.iov_base;
2548         void *end = p + msg->front.iov_len;
2549
2550         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2551         next_mds = ceph_decode_32(&p);
2552         fwd_seq = ceph_decode_32(&p);
2553
2554         mutex_lock(&mdsc->mutex);
2555         req = lookup_get_request(mdsc, tid);
2556         if (!req) {
2557                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2558                 goto out;  /* dup reply? */
2559         }
2560
2561         if (req->r_aborted) {
2562                 dout("forward tid %llu aborted, unregistering\n", tid);
2563                 __unregister_request(mdsc, req);
2564         } else if (fwd_seq <= req->r_num_fwd) {
2565                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2566                      tid, next_mds, req->r_num_fwd, fwd_seq);
2567         } else {
2568                 /* resend. forward race not possible; mds would drop */
2569                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2570                 BUG_ON(req->r_err);
2571                 BUG_ON(req->r_got_result);
2572                 req->r_attempts = 0;
2573                 req->r_num_fwd = fwd_seq;
2574                 req->r_resend_mds = next_mds;
2575                 put_request_session(req);
2576                 __do_request(mdsc, req);
2577         }
2578         ceph_mdsc_put_request(req);
2579 out:
2580         mutex_unlock(&mdsc->mutex);
2581         return;
2582
2583 bad:
2584         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2585 }
2586
2587 /*
2588  * handle a mds session control message
2589  */
2590 static void handle_session(struct ceph_mds_session *session,
2591                            struct ceph_msg *msg)
2592 {
2593         struct ceph_mds_client *mdsc = session->s_mdsc;
2594         u32 op;
2595         u64 seq;
2596         int mds = session->s_mds;
2597         struct ceph_mds_session_head *h = msg->front.iov_base;
2598         int wake = 0;
2599
2600         /* decode */
2601         if (msg->front.iov_len != sizeof(*h))
2602                 goto bad;
2603         op = le32_to_cpu(h->op);
2604         seq = le64_to_cpu(h->seq);
2605
2606         mutex_lock(&mdsc->mutex);
2607         if (op == CEPH_SESSION_CLOSE)
2608                 __unregister_session(mdsc, session);
2609         /* FIXME: this ttl calculation is generous */
2610         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2611         mutex_unlock(&mdsc->mutex);
2612
2613         mutex_lock(&session->s_mutex);
2614
2615         dout("handle_session mds%d %s %p state %s seq %llu\n",
2616              mds, ceph_session_op_name(op), session,
2617              ceph_session_state_name(session->s_state), seq);
2618
2619         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2620                 session->s_state = CEPH_MDS_SESSION_OPEN;
2621                 pr_info("mds%d came back\n", session->s_mds);
2622         }
2623
2624         switch (op) {
2625         case CEPH_SESSION_OPEN:
2626                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2627                         pr_info("mds%d reconnect success\n", session->s_mds);
2628                 session->s_state = CEPH_MDS_SESSION_OPEN;
2629                 renewed_caps(mdsc, session, 0);
2630                 wake = 1;
2631                 if (mdsc->stopping)
2632                         __close_session(mdsc, session);
2633                 break;
2634
2635         case CEPH_SESSION_RENEWCAPS:
2636                 if (session->s_renew_seq == seq)
2637                         renewed_caps(mdsc, session, 1);
2638                 break;
2639
2640         case CEPH_SESSION_CLOSE:
2641                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2642                         pr_info("mds%d reconnect denied\n", session->s_mds);
2643                 cleanup_session_requests(mdsc, session);
2644                 remove_session_caps(session);
2645                 wake = 2; /* for good measure */
2646                 wake_up_all(&mdsc->session_close_wq);
2647                 break;
2648
2649         case CEPH_SESSION_STALE:
2650                 pr_info("mds%d caps went stale, renewing\n",
2651                         session->s_mds);
2652                 spin_lock(&session->s_gen_ttl_lock);
2653                 session->s_cap_gen++;
2654                 session->s_cap_ttl = jiffies - 1;
2655                 spin_unlock(&session->s_gen_ttl_lock);
2656                 send_renew_caps(mdsc, session);
2657                 break;
2658
2659         case CEPH_SESSION_RECALL_STATE:
2660                 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2661                 break;
2662
2663         case CEPH_SESSION_FLUSHMSG:
2664                 send_flushmsg_ack(mdsc, session, seq);
2665                 break;
2666
2667         case CEPH_SESSION_FORCE_RO:
2668                 dout("force_session_readonly %p\n", session);
2669                 spin_lock(&session->s_cap_lock);
2670                 session->s_readonly = true;
2671                 spin_unlock(&session->s_cap_lock);
2672                 wake_up_session_caps(session, 0);
2673                 break;
2674
2675         case CEPH_SESSION_REJECT:
2676                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
2677                 pr_info("mds%d rejected session\n", session->s_mds);
2678                 session->s_state = CEPH_MDS_SESSION_REJECTED;
2679                 cleanup_session_requests(mdsc, session);
2680                 remove_session_caps(session);
2681                 wake = 2; /* for good measure */
2682                 break;
2683
2684         default:
2685                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2686                 WARN_ON(1);
2687         }
2688
2689         mutex_unlock(&session->s_mutex);
2690         if (wake) {
2691                 mutex_lock(&mdsc->mutex);
2692                 __wake_requests(mdsc, &session->s_waiting);
2693                 if (wake == 2)
2694                         kick_requests(mdsc, mds);
2695                 mutex_unlock(&mdsc->mutex);
2696         }
2697         return;
2698
2699 bad:
2700         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2701                (int)msg->front.iov_len);
2702         ceph_msg_dump(msg);
2703         return;
2704 }
2705
2706
2707 /*
2708  * called under session->mutex.
2709  */
2710 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2711                                    struct ceph_mds_session *session)
2712 {
2713         struct ceph_mds_request *req, *nreq;
2714         struct rb_node *p;
2715         int err;
2716
2717         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2718
2719         mutex_lock(&mdsc->mutex);
2720         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2721                 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2722                 if (!err) {
2723                         ceph_msg_get(req->r_request);
2724                         ceph_con_send(&session->s_con, req->r_request);
2725                 }
2726         }
2727
2728         /*
2729          * also re-send old requests when MDS enters reconnect stage. So that MDS
2730          * can process completed request in clientreplay stage.
2731          */
2732         p = rb_first(&mdsc->request_tree);
2733         while (p) {
2734                 req = rb_entry(p, struct ceph_mds_request, r_node);
2735                 p = rb_next(p);
2736                 if (req->r_got_unsafe)
2737                         continue;
2738                 if (req->r_attempts == 0)
2739                         continue; /* only old requests */
2740                 if (req->r_session &&
2741                     req->r_session->s_mds == session->s_mds) {
2742                         err = __prepare_send_request(mdsc, req,
2743                                                      session->s_mds, true);
2744                         if (!err) {
2745                                 ceph_msg_get(req->r_request);
2746                                 ceph_con_send(&session->s_con, req->r_request);
2747                         }
2748                 }
2749         }
2750         mutex_unlock(&mdsc->mutex);
2751 }
2752
2753 /*
2754  * Encode information about a cap for a reconnect with the MDS.
2755  */
2756 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2757                           void *arg)
2758 {
2759         union {
2760                 struct ceph_mds_cap_reconnect v2;
2761                 struct ceph_mds_cap_reconnect_v1 v1;
2762         } rec;
2763         struct ceph_inode_info *ci;
2764         struct ceph_reconnect_state *recon_state = arg;
2765         struct ceph_pagelist *pagelist = recon_state->pagelist;
2766         char *path;
2767         int pathlen, err;
2768         u64 pathbase;
2769         u64 snap_follows;
2770         struct dentry *dentry;
2771
2772         ci = cap->ci;
2773
2774         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2775              inode, ceph_vinop(inode), cap, cap->cap_id,
2776              ceph_cap_string(cap->issued));
2777         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2778         if (err)
2779                 return err;
2780
2781         dentry = d_find_alias(inode);
2782         if (dentry) {
2783                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2784                 if (IS_ERR(path)) {
2785                         err = PTR_ERR(path);
2786                         goto out_dput;
2787                 }
2788         } else {
2789                 path = NULL;
2790                 pathlen = 0;
2791                 pathbase = 0;
2792         }
2793
2794         spin_lock(&ci->i_ceph_lock);
2795         cap->seq = 0;        /* reset cap seq */
2796         cap->issue_seq = 0;  /* and issue_seq */
2797         cap->mseq = 0;       /* and migrate_seq */
2798         cap->cap_gen = cap->session->s_cap_gen;
2799
2800         if (recon_state->msg_version >= 2) {
2801                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2802                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2803                 rec.v2.issued = cpu_to_le32(cap->issued);
2804                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2805                 rec.v2.pathbase = cpu_to_le64(pathbase);
2806                 rec.v2.flock_len = 0;
2807         } else {
2808                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2809                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2810                 rec.v1.issued = cpu_to_le32(cap->issued);
2811                 rec.v1.size = cpu_to_le64(inode->i_size);
2812                 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2813                 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2814                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2815                 rec.v1.pathbase = cpu_to_le64(pathbase);
2816         }
2817
2818         if (list_empty(&ci->i_cap_snaps)) {
2819                 snap_follows = 0;
2820         } else {
2821                 struct ceph_cap_snap *capsnap =
2822                         list_first_entry(&ci->i_cap_snaps,
2823                                          struct ceph_cap_snap, ci_item);
2824                 snap_follows = capsnap->follows;
2825         }
2826         spin_unlock(&ci->i_ceph_lock);
2827
2828         if (recon_state->msg_version >= 2) {
2829                 int num_fcntl_locks, num_flock_locks;
2830                 struct ceph_filelock *flocks;
2831                 size_t struct_len, total_len = 0;
2832                 u8 struct_v = 0;
2833
2834 encode_again:
2835                 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2836                 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2837                                  sizeof(struct ceph_filelock), GFP_NOFS);
2838                 if (!flocks) {
2839                         err = -ENOMEM;
2840                         goto out_free;
2841                 }
2842                 err = ceph_encode_locks_to_buffer(inode, flocks,
2843                                                   num_fcntl_locks,
2844                                                   num_flock_locks);
2845                 if (err) {
2846                         kfree(flocks);
2847                         if (err == -ENOSPC)
2848                                 goto encode_again;
2849                         goto out_free;
2850                 }
2851
2852                 if (recon_state->msg_version >= 3) {
2853                         /* version, compat_version and struct_len */
2854                         total_len = 2 * sizeof(u8) + sizeof(u32);
2855                         struct_v = 2;
2856                 }
2857                 /*
2858                  * number of encoded locks is stable, so copy to pagelist
2859                  */
2860                 struct_len = 2 * sizeof(u32) +
2861                             (num_fcntl_locks + num_flock_locks) *
2862                             sizeof(struct ceph_filelock);
2863                 rec.v2.flock_len = cpu_to_le32(struct_len);
2864
2865                 struct_len += sizeof(rec.v2);
2866                 struct_len += sizeof(u32) + pathlen;
2867
2868                 if (struct_v >= 2)
2869                         struct_len += sizeof(u64); /* snap_follows */
2870
2871                 total_len += struct_len;
2872                 err = ceph_pagelist_reserve(pagelist, total_len);
2873
2874                 if (!err) {
2875                         if (recon_state->msg_version >= 3) {
2876                                 ceph_pagelist_encode_8(pagelist, struct_v);
2877                                 ceph_pagelist_encode_8(pagelist, 1);
2878                                 ceph_pagelist_encode_32(pagelist, struct_len);
2879                         }
2880                         ceph_pagelist_encode_string(pagelist, path, pathlen);
2881                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
2882                         ceph_locks_to_pagelist(flocks, pagelist,
2883                                                num_fcntl_locks,
2884                                                num_flock_locks);
2885                         if (struct_v >= 2)
2886                                 ceph_pagelist_encode_64(pagelist, snap_follows);
2887                 }
2888                 kfree(flocks);
2889         } else {
2890                 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
2891                 err = ceph_pagelist_reserve(pagelist, size);
2892                 if (!err) {
2893                         ceph_pagelist_encode_string(pagelist, path, pathlen);
2894                         ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
2895                 }
2896         }
2897
2898         recon_state->nr_caps++;
2899 out_free:
2900         kfree(path);
2901 out_dput:
2902         dput(dentry);
2903         return err;
2904 }
2905
2906
2907 /*
2908  * If an MDS fails and recovers, clients need to reconnect in order to
2909  * reestablish shared state.  This includes all caps issued through
2910  * this session _and_ the snap_realm hierarchy.  Because it's not
2911  * clear which snap realms the mds cares about, we send everything we
2912  * know about.. that ensures we'll then get any new info the
2913  * recovering MDS might have.
2914  *
2915  * This is a relatively heavyweight operation, but it's rare.
2916  *
2917  * called with mdsc->mutex held.
2918  */
2919 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2920                                struct ceph_mds_session *session)
2921 {
2922         struct ceph_msg *reply;
2923         struct rb_node *p;
2924         int mds = session->s_mds;
2925         int err = -ENOMEM;
2926         int s_nr_caps;
2927         struct ceph_pagelist *pagelist;
2928         struct ceph_reconnect_state recon_state;
2929
2930         pr_info("mds%d reconnect start\n", mds);
2931
2932         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2933         if (!pagelist)
2934                 goto fail_nopagelist;
2935         ceph_pagelist_init(pagelist);
2936
2937         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2938         if (!reply)
2939                 goto fail_nomsg;
2940
2941         mutex_lock(&session->s_mutex);
2942         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2943         session->s_seq = 0;
2944
2945         dout("session %p state %s\n", session,
2946              ceph_session_state_name(session->s_state));
2947
2948         spin_lock(&session->s_gen_ttl_lock);
2949         session->s_cap_gen++;
2950         spin_unlock(&session->s_gen_ttl_lock);
2951
2952         spin_lock(&session->s_cap_lock);
2953         /* don't know if session is readonly */
2954         session->s_readonly = 0;
2955         /*
2956          * notify __ceph_remove_cap() that we are composing cap reconnect.
2957          * If a cap get released before being added to the cap reconnect,
2958          * __ceph_remove_cap() should skip queuing cap release.
2959          */
2960         session->s_cap_reconnect = 1;
2961         /* drop old cap expires; we're about to reestablish that state */
2962         cleanup_cap_releases(mdsc, session);
2963
2964         /* trim unused caps to reduce MDS's cache rejoin time */
2965         if (mdsc->fsc->sb->s_root)
2966                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
2967
2968         ceph_con_close(&session->s_con);
2969         ceph_con_open(&session->s_con,
2970                       CEPH_ENTITY_TYPE_MDS, mds,
2971                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2972
2973         /* replay unsafe requests */
2974         replay_unsafe_requests(mdsc, session);
2975
2976         down_read(&mdsc->snap_rwsem);
2977
2978         /* traverse this session's caps */
2979         s_nr_caps = session->s_nr_caps;
2980         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2981         if (err)
2982                 goto fail;
2983
2984         recon_state.nr_caps = 0;
2985         recon_state.pagelist = pagelist;
2986         if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
2987                 recon_state.msg_version = 3;
2988         else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
2989                 recon_state.msg_version = 2;
2990         else
2991                 recon_state.msg_version = 1;
2992         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2993         if (err < 0)
2994                 goto fail;
2995
2996         spin_lock(&session->s_cap_lock);
2997         session->s_cap_reconnect = 0;
2998         spin_unlock(&session->s_cap_lock);
2999
3000         /*
3001          * snaprealms.  we provide mds with the ino, seq (version), and
3002          * parent for all of our realms.  If the mds has any newer info,
3003          * it will tell us.
3004          */
3005         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3006                 struct ceph_snap_realm *realm =
3007                         rb_entry(p, struct ceph_snap_realm, node);
3008                 struct ceph_mds_snaprealm_reconnect sr_rec;
3009
3010                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3011                      realm->ino, realm->seq, realm->parent_ino);
3012                 sr_rec.ino = cpu_to_le64(realm->ino);
3013                 sr_rec.seq = cpu_to_le64(realm->seq);
3014                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3015                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3016                 if (err)
3017                         goto fail;
3018         }
3019
3020         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3021
3022         /* raced with cap release? */
3023         if (s_nr_caps != recon_state.nr_caps) {
3024                 struct page *page = list_first_entry(&pagelist->head,
3025                                                      struct page, lru);
3026                 __le32 *addr = kmap_atomic(page);
3027                 *addr = cpu_to_le32(recon_state.nr_caps);
3028                 kunmap_atomic(addr);
3029         }
3030
3031         reply->hdr.data_len = cpu_to_le32(pagelist->length);
3032         ceph_msg_data_add_pagelist(reply, pagelist);
3033
3034         ceph_early_kick_flushing_caps(mdsc, session);
3035
3036         ceph_con_send(&session->s_con, reply);
3037
3038         mutex_unlock(&session->s_mutex);
3039
3040         mutex_lock(&mdsc->mutex);
3041         __wake_requests(mdsc, &session->s_waiting);
3042         mutex_unlock(&mdsc->mutex);
3043
3044         up_read(&mdsc->snap_rwsem);
3045         return;
3046
3047 fail:
3048         ceph_msg_put(reply);
3049         up_read(&mdsc->snap_rwsem);
3050         mutex_unlock(&session->s_mutex);
3051 fail_nomsg:
3052         ceph_pagelist_release(pagelist);
3053 fail_nopagelist:
3054         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3055         return;
3056 }
3057
3058
3059 /*
3060  * compare old and new mdsmaps, kicking requests
3061  * and closing out old connections as necessary
3062  *
3063  * called under mdsc->mutex.
3064  */
3065 static void check_new_map(struct ceph_mds_client *mdsc,
3066                           struct ceph_mdsmap *newmap,
3067                           struct ceph_mdsmap *oldmap)
3068 {
3069         int i;
3070         int oldstate, newstate;
3071         struct ceph_mds_session *s;
3072
3073         dout("check_new_map new %u old %u\n",
3074              newmap->m_epoch, oldmap->m_epoch);
3075
3076         for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
3077                 if (mdsc->sessions[i] == NULL)
3078                         continue;
3079                 s = mdsc->sessions[i];
3080                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3081                 newstate = ceph_mdsmap_get_state(newmap, i);
3082
3083                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3084                      i, ceph_mds_state_name(oldstate),
3085                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3086                      ceph_mds_state_name(newstate),
3087                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3088                      ceph_session_state_name(s->s_state));
3089
3090                 if (i >= newmap->m_max_mds ||
3091                     memcmp(ceph_mdsmap_get_addr(oldmap, i),
3092                            ceph_mdsmap_get_addr(newmap, i),
3093                            sizeof(struct ceph_entity_addr))) {
3094                         if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3095                                 /* the session never opened, just close it
3096                                  * out now */
3097                                 __wake_requests(mdsc, &s->s_waiting);
3098                                 __unregister_session(mdsc, s);
3099                         } else {
3100                                 /* just close it */
3101                                 mutex_unlock(&mdsc->mutex);
3102                                 mutex_lock(&s->s_mutex);
3103                                 mutex_lock(&mdsc->mutex);
3104                                 ceph_con_close(&s->s_con);
3105                                 mutex_unlock(&s->s_mutex);
3106                                 s->s_state = CEPH_MDS_SESSION_RESTARTING;
3107                         }
3108                 } else if (oldstate == newstate) {
3109                         continue;  /* nothing new with this mds */
3110                 }
3111
3112                 /*
3113                  * send reconnect?
3114                  */
3115                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3116                     newstate >= CEPH_MDS_STATE_RECONNECT) {
3117                         mutex_unlock(&mdsc->mutex);
3118                         send_mds_reconnect(mdsc, s);
3119                         mutex_lock(&mdsc->mutex);
3120                 }
3121
3122                 /*
3123                  * kick request on any mds that has gone active.
3124                  */
3125                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3126                     newstate >= CEPH_MDS_STATE_ACTIVE) {
3127                         if (oldstate != CEPH_MDS_STATE_CREATING &&
3128                             oldstate != CEPH_MDS_STATE_STARTING)
3129                                 pr_info("mds%d recovery completed\n", s->s_mds);
3130                         kick_requests(mdsc, i);
3131                         ceph_kick_flushing_caps(mdsc, s);
3132                         wake_up_session_caps(s, 1);
3133                 }
3134         }
3135
3136         for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
3137                 s = mdsc->sessions[i];
3138                 if (!s)
3139                         continue;
3140                 if (!ceph_mdsmap_is_laggy(newmap, i))
3141                         continue;
3142                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3143                     s->s_state == CEPH_MDS_SESSION_HUNG ||
3144                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
3145                         dout(" connecting to export targets of laggy mds%d\n",
3146                              i);
3147                         __open_export_target_sessions(mdsc, s);
3148                 }
3149         }
3150 }
3151
3152
3153
3154 /*
3155  * leases
3156  */
3157
3158 /*
3159  * caller must hold session s_mutex, dentry->d_lock
3160  */
3161 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3162 {
3163         struct ceph_dentry_info *di = ceph_dentry(dentry);
3164
3165         ceph_put_mds_session(di->lease_session);
3166         di->lease_session = NULL;
3167 }
3168
3169 static void handle_lease(struct ceph_mds_client *mdsc,
3170                          struct ceph_mds_session *session,
3171                          struct ceph_msg *msg)
3172 {
3173         struct super_block *sb = mdsc->fsc->sb;
3174         struct inode *inode;
3175         struct dentry *parent, *dentry;
3176         struct ceph_dentry_info *di;
3177         int mds = session->s_mds;
3178         struct ceph_mds_lease *h = msg->front.iov_base;
3179         u32 seq;
3180         struct ceph_vino vino;
3181         struct qstr dname;
3182         int release = 0;
3183
3184         dout("handle_lease from mds%d\n", mds);
3185
3186         /* decode */
3187         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3188                 goto bad;
3189         vino.ino = le64_to_cpu(h->ino);
3190         vino.snap = CEPH_NOSNAP;
3191         seq = le32_to_cpu(h->seq);
3192         dname.name = (void *)h + sizeof(*h) + sizeof(u32);
3193         dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
3194         if (dname.len != get_unaligned_le32(h+1))
3195                 goto bad;
3196
3197         /* lookup inode */
3198         inode = ceph_find_inode(sb, vino);
3199         dout("handle_lease %s, ino %llx %p %.*s\n",
3200              ceph_lease_op_name(h->action), vino.ino, inode,
3201              dname.len, dname.name);
3202
3203         mutex_lock(&session->s_mutex);
3204         session->s_seq++;
3205
3206         if (inode == NULL) {
3207                 dout("handle_lease no inode %llx\n", vino.ino);
3208                 goto release;
3209         }
3210
3211         /* dentry */
3212         parent = d_find_alias(inode);
3213         if (!parent) {
3214                 dout("no parent dentry on inode %p\n", inode);
3215                 WARN_ON(1);
3216                 goto release;  /* hrm... */
3217         }
3218         dname.hash = full_name_hash(parent, dname.name, dname.len);
3219         dentry = d_lookup(parent, &dname);
3220         dput(parent);
3221         if (!dentry)
3222                 goto release;
3223
3224         spin_lock(&dentry->d_lock);
3225         di = ceph_dentry(dentry);
3226         switch (h->action) {
3227         case CEPH_MDS_LEASE_REVOKE:
3228                 if (di->lease_session == session) {
3229                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3230                                 h->seq = cpu_to_le32(di->lease_seq);
3231                         __ceph_mdsc_drop_dentry_lease(dentry);
3232                 }
3233                 release = 1;
3234                 break;
3235
3236         case CEPH_MDS_LEASE_RENEW:
3237                 if (di->lease_session == session &&
3238                     di->lease_gen == session->s_cap_gen &&
3239                     di->lease_renew_from &&
3240                     di->lease_renew_after == 0) {
3241                         unsigned long duration =
3242                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3243
3244                         di->lease_seq = seq;
3245                         di->time = di->lease_renew_from + duration;
3246                         di->lease_renew_after = di->lease_renew_from +
3247                                 (duration >> 1);
3248                         di->lease_renew_from = 0;
3249                 }
3250                 break;
3251         }
3252         spin_unlock(&dentry->d_lock);
3253         dput(dentry);
3254
3255         if (!release)
3256                 goto out;
3257
3258 release:
3259         /* let's just reuse the same message */
3260         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3261         ceph_msg_get(msg);
3262         ceph_con_send(&session->s_con, msg);
3263
3264 out:
3265         iput(inode);
3266         mutex_unlock(&session->s_mutex);
3267         return;
3268
3269 bad:
3270         pr_err("corrupt lease message\n");
3271         ceph_msg_dump(msg);
3272 }
3273
3274 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3275                               struct inode *inode,
3276                               struct dentry *dentry, char action,
3277                               u32 seq)
3278 {
3279         struct ceph_msg *msg;
3280         struct ceph_mds_lease *lease;
3281         int len = sizeof(*lease) + sizeof(u32);
3282         int dnamelen = 0;
3283
3284         dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3285              inode, dentry, ceph_lease_op_name(action), session->s_mds);
3286         dnamelen = dentry->d_name.len;
3287         len += dnamelen;
3288
3289         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3290         if (!msg)
3291                 return;
3292         lease = msg->front.iov_base;
3293         lease->action = action;
3294         lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3295         lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3296         lease->seq = cpu_to_le32(seq);
3297         put_unaligned_le32(dnamelen, lease + 1);
3298         memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3299
3300         /*
3301          * if this is a preemptive lease RELEASE, no need to
3302          * flush request stream, since the actual request will
3303          * soon follow.
3304          */
3305         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3306
3307         ceph_con_send(&session->s_con, msg);
3308 }
3309
3310 /*
3311  * drop all leases (and dentry refs) in preparation for umount
3312  */
3313 static void drop_leases(struct ceph_mds_client *mdsc)
3314 {
3315         int i;
3316
3317         dout("drop_leases\n");
3318         mutex_lock(&mdsc->mutex);
3319         for (i = 0; i < mdsc->max_sessions; i++) {
3320                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3321                 if (!s)
3322                         continue;
3323                 mutex_unlock(&mdsc->mutex);
3324                 mutex_lock(&s->s_mutex);
3325                 mutex_unlock(&s->s_mutex);
3326                 ceph_put_mds_session(s);
3327                 mutex_lock(&mdsc->mutex);
3328         }
3329         mutex_unlock(&mdsc->mutex);
3330 }
3331
3332
3333
3334 /*
3335  * delayed work -- periodically trim expired leases, renew caps with mds
3336  */
3337 static void schedule_delayed(struct ceph_mds_client *mdsc)
3338 {
3339         int delay = 5;
3340         unsigned hz = round_jiffies_relative(HZ * delay);
3341         schedule_delayed_work(&mdsc->delayed_work, hz);
3342 }
3343
3344 static void delayed_work(struct work_struct *work)
3345 {
3346         int i;
3347         struct ceph_mds_client *mdsc =
3348                 container_of(work, struct ceph_mds_client, delayed_work.work);
3349         int renew_interval;
3350         int renew_caps;
3351
3352         dout("mdsc delayed_work\n");
3353         ceph_check_delayed_caps(mdsc);
3354
3355         mutex_lock(&mdsc->mutex);
3356         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3357         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3358                                    mdsc->last_renew_caps);
3359         if (renew_caps)
3360                 mdsc->last_renew_caps = jiffies;
3361
3362         for (i = 0; i < mdsc->max_sessions; i++) {
3363                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3364                 if (s == NULL)
3365                         continue;
3366                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3367                         dout("resending session close request for mds%d\n",
3368                              s->s_mds);
3369                         request_close_session(mdsc, s);
3370                         ceph_put_mds_session(s);
3371                         continue;
3372                 }
3373                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3374                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3375                                 s->s_state = CEPH_MDS_SESSION_HUNG;
3376                                 pr_info("mds%d hung\n", s->s_mds);
3377                         }
3378                 }
3379                 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3380                         /* this mds is failed or recovering, just wait */
3381                         ceph_put_mds_session(s);
3382                         continue;
3383                 }
3384                 mutex_unlock(&mdsc->mutex);
3385
3386                 mutex_lock(&s->s_mutex);
3387                 if (renew_caps)
3388                         send_renew_caps(mdsc, s);
3389                 else
3390                         ceph_con_keepalive(&s->s_con);
3391                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3392                     s->s_state == CEPH_MDS_SESSION_HUNG)
3393                         ceph_send_cap_releases(mdsc, s);
3394                 mutex_unlock(&s->s_mutex);
3395                 ceph_put_mds_session(s);
3396
3397                 mutex_lock(&mdsc->mutex);
3398         }
3399         mutex_unlock(&mdsc->mutex);
3400
3401         schedule_delayed(mdsc);
3402 }
3403
3404 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3405
3406 {
3407         struct ceph_mds_client *mdsc;
3408
3409         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3410         if (!mdsc)
3411                 return -ENOMEM;
3412         mdsc->fsc = fsc;
3413         fsc->mdsc = mdsc;
3414         mutex_init(&mdsc->mutex);
3415         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3416         if (mdsc->mdsmap == NULL) {
3417                 kfree(mdsc);
3418                 return -ENOMEM;
3419         }
3420
3421         init_completion(&mdsc->safe_umount_waiters);
3422         init_waitqueue_head(&mdsc->session_close_wq);
3423         INIT_LIST_HEAD(&mdsc->waiting_for_map);
3424         mdsc->sessions = NULL;
3425         atomic_set(&mdsc->num_sessions, 0);
3426         mdsc->max_sessions = 0;
3427         mdsc->stopping = 0;
3428         mdsc->last_snap_seq = 0;
3429         init_rwsem(&mdsc->snap_rwsem);
3430         mdsc->snap_realms = RB_ROOT;
3431         INIT_LIST_HEAD(&mdsc->snap_empty);
3432         spin_lock_init(&mdsc->snap_empty_lock);
3433         mdsc->last_tid = 0;
3434         mdsc->oldest_tid = 0;
3435         mdsc->request_tree = RB_ROOT;
3436         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3437         mdsc->last_renew_caps = jiffies;
3438         INIT_LIST_HEAD(&mdsc->cap_delay_list);
3439         spin_lock_init(&mdsc->cap_delay_lock);
3440         INIT_LIST_HEAD(&mdsc->snap_flush_list);
3441         spin_lock_init(&mdsc->snap_flush_lock);
3442         mdsc->last_cap_flush_tid = 1;
3443         INIT_LIST_HEAD(&mdsc->cap_flush_list);
3444         INIT_LIST_HEAD(&mdsc->cap_dirty);
3445         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3446         mdsc->num_cap_flushing = 0;
3447         spin_lock_init(&mdsc->cap_dirty_lock);
3448         init_waitqueue_head(&mdsc->cap_flushing_wq);
3449         spin_lock_init(&mdsc->dentry_lru_lock);
3450         INIT_LIST_HEAD(&mdsc->dentry_lru);
3451
3452         ceph_caps_init(mdsc);
3453         ceph_adjust_min_caps(mdsc, fsc->min_caps);
3454
3455         init_rwsem(&mdsc->pool_perm_rwsem);
3456         mdsc->pool_perm_tree = RB_ROOT;
3457
3458         return 0;
3459 }
3460
3461 /*
3462  * Wait for safe replies on open mds requests.  If we time out, drop
3463  * all requests from the tree to avoid dangling dentry refs.
3464  */
3465 static void wait_requests(struct ceph_mds_client *mdsc)
3466 {
3467         struct ceph_options *opts = mdsc->fsc->client->options;
3468         struct ceph_mds_request *req;
3469
3470         mutex_lock(&mdsc->mutex);
3471         if (__get_oldest_req(mdsc)) {
3472                 mutex_unlock(&mdsc->mutex);
3473
3474                 dout("wait_requests waiting for requests\n");
3475                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3476                                     ceph_timeout_jiffies(opts->mount_timeout));
3477
3478                 /* tear down remaining requests */
3479                 mutex_lock(&mdsc->mutex);
3480                 while ((req = __get_oldest_req(mdsc))) {
3481                         dout("wait_requests timed out on tid %llu\n",
3482                              req->r_tid);
3483                         __unregister_request(mdsc, req);
3484                 }
3485         }
3486         mutex_unlock(&mdsc->mutex);
3487         dout("wait_requests done\n");
3488 }
3489
3490 /*
3491  * called before mount is ro, and before dentries are torn down.
3492  * (hmm, does this still race with new lookups?)
3493  */
3494 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3495 {
3496         dout("pre_umount\n");
3497         mdsc->stopping = 1;
3498
3499         drop_leases(mdsc);
3500         ceph_flush_dirty_caps(mdsc);
3501         wait_requests(mdsc);
3502
3503         /*
3504          * wait for reply handlers to drop their request refs and
3505          * their inode/dcache refs
3506          */
3507         ceph_msgr_flush();
3508 }
3509
3510 /*
3511  * wait for all write mds requests to flush.
3512  */
3513 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3514 {
3515         struct ceph_mds_request *req = NULL, *nextreq;
3516         struct rb_node *n;
3517
3518         mutex_lock(&mdsc->mutex);
3519         dout("wait_unsafe_requests want %lld\n", want_tid);
3520 restart:
3521         req = __get_oldest_req(mdsc);
3522         while (req && req->r_tid <= want_tid) {
3523                 /* find next request */
3524                 n = rb_next(&req->r_node);
3525                 if (n)
3526                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3527                 else
3528                         nextreq = NULL;
3529                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
3530                     (req->r_op & CEPH_MDS_OP_WRITE)) {
3531                         /* write op */
3532                         ceph_mdsc_get_request(req);
3533                         if (nextreq)
3534                                 ceph_mdsc_get_request(nextreq);
3535                         mutex_unlock(&mdsc->mutex);
3536                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3537                              req->r_tid, want_tid);
3538                         wait_for_completion(&req->r_safe_completion);
3539                         mutex_lock(&mdsc->mutex);
3540                         ceph_mdsc_put_request(req);
3541                         if (!nextreq)
3542                                 break;  /* next dne before, so we're done! */
3543                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
3544                                 /* next request was removed from tree */
3545                                 ceph_mdsc_put_request(nextreq);
3546                                 goto restart;
3547                         }
3548                         ceph_mdsc_put_request(nextreq);  /* won't go away */
3549                 }
3550                 req = nextreq;
3551         }
3552         mutex_unlock(&mdsc->mutex);
3553         dout("wait_unsafe_requests done\n");
3554 }
3555
3556 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3557 {
3558         u64 want_tid, want_flush;
3559
3560         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3561                 return;
3562
3563         dout("sync\n");
3564         mutex_lock(&mdsc->mutex);
3565         want_tid = mdsc->last_tid;
3566         mutex_unlock(&mdsc->mutex);
3567
3568         ceph_flush_dirty_caps(mdsc);
3569         spin_lock(&mdsc->cap_dirty_lock);
3570         want_flush = mdsc->last_cap_flush_tid;
3571         if (!list_empty(&mdsc->cap_flush_list)) {
3572                 struct ceph_cap_flush *cf =
3573                         list_last_entry(&mdsc->cap_flush_list,
3574                                         struct ceph_cap_flush, g_list);
3575                 cf->wake = true;
3576         }
3577         spin_unlock(&mdsc->cap_dirty_lock);
3578
3579         dout("sync want tid %lld flush_seq %lld\n",
3580              want_tid, want_flush);
3581
3582         wait_unsafe_requests(mdsc, want_tid);
3583         wait_caps_flush(mdsc, want_flush);
3584 }
3585
3586 /*
3587  * true if all sessions are closed, or we force unmount
3588  */
3589 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
3590 {
3591         if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3592                 return true;
3593         return atomic_read(&mdsc->num_sessions) <= skipped;
3594 }
3595
3596 /*
3597  * called after sb is ro.
3598  */
3599 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3600 {
3601         struct ceph_options *opts = mdsc->fsc->client->options;
3602         struct ceph_mds_session *session;
3603         int i;
3604         int skipped = 0;
3605
3606         dout("close_sessions\n");
3607
3608         /* close sessions */
3609         mutex_lock(&mdsc->mutex);
3610         for (i = 0; i < mdsc->max_sessions; i++) {
3611                 session = __ceph_lookup_mds_session(mdsc, i);
3612                 if (!session)
3613                         continue;
3614                 mutex_unlock(&mdsc->mutex);
3615                 mutex_lock(&session->s_mutex);
3616                 if (__close_session(mdsc, session) <= 0)
3617                         skipped++;
3618                 mutex_unlock(&session->s_mutex);
3619                 ceph_put_mds_session(session);
3620                 mutex_lock(&mdsc->mutex);
3621         }
3622         mutex_unlock(&mdsc->mutex);
3623
3624         dout("waiting for sessions to close\n");
3625         wait_event_timeout(mdsc->session_close_wq,
3626                            done_closing_sessions(mdsc, skipped),
3627                            ceph_timeout_jiffies(opts->mount_timeout));
3628
3629         /* tear down remaining sessions */
3630         mutex_lock(&mdsc->mutex);
3631         for (i = 0; i < mdsc->max_sessions; i++) {
3632                 if (mdsc->sessions[i]) {
3633                         session = get_session(mdsc->sessions[i]);
3634                         __unregister_session(mdsc, session);
3635                         mutex_unlock(&mdsc->mutex);
3636                         mutex_lock(&session->s_mutex);
3637                         remove_session_caps(session);
3638                         mutex_unlock(&session->s_mutex);
3639                         ceph_put_mds_session(session);
3640                         mutex_lock(&mdsc->mutex);
3641                 }
3642         }
3643         WARN_ON(!list_empty(&mdsc->cap_delay_list));
3644         mutex_unlock(&mdsc->mutex);
3645
3646         ceph_cleanup_empty_realms(mdsc);
3647
3648         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3649
3650         dout("stopped\n");
3651 }
3652
3653 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3654 {
3655         struct ceph_mds_session *session;
3656         int mds;
3657
3658         dout("force umount\n");
3659
3660         mutex_lock(&mdsc->mutex);
3661         for (mds = 0; mds < mdsc->max_sessions; mds++) {
3662                 session = __ceph_lookup_mds_session(mdsc, mds);
3663                 if (!session)
3664                         continue;
3665                 mutex_unlock(&mdsc->mutex);
3666                 mutex_lock(&session->s_mutex);
3667                 __close_session(mdsc, session);
3668                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3669                         cleanup_session_requests(mdsc, session);
3670                         remove_session_caps(session);
3671                 }
3672                 mutex_unlock(&session->s_mutex);
3673                 ceph_put_mds_session(session);
3674                 mutex_lock(&mdsc->mutex);
3675                 kick_requests(mdsc, mds);
3676         }
3677         __wake_requests(mdsc, &mdsc->waiting_for_map);
3678         mutex_unlock(&mdsc->mutex);
3679 }
3680
3681 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3682 {
3683         dout("stop\n");
3684         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3685         if (mdsc->mdsmap)
3686                 ceph_mdsmap_destroy(mdsc->mdsmap);
3687         kfree(mdsc->sessions);
3688         ceph_caps_finalize(mdsc);
3689         ceph_pool_perm_destroy(mdsc);
3690 }
3691
3692 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3693 {
3694         struct ceph_mds_client *mdsc = fsc->mdsc;
3695
3696         dout("mdsc_destroy %p\n", mdsc);
3697         ceph_mdsc_stop(mdsc);
3698
3699         /* flush out any connection work with references to us */
3700         ceph_msgr_flush();
3701
3702         fsc->mdsc = NULL;
3703         kfree(mdsc);
3704         dout("mdsc_destroy %p done\n", mdsc);
3705 }
3706
3707 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3708 {
3709         struct ceph_fs_client *fsc = mdsc->fsc;
3710         const char *mds_namespace = fsc->mount_options->mds_namespace;
3711         void *p = msg->front.iov_base;
3712         void *end = p + msg->front.iov_len;
3713         u32 epoch;
3714         u32 map_len;
3715         u32 num_fs;
3716         u32 mount_fscid = (u32)-1;
3717         u8 struct_v, struct_cv;
3718         int err = -EINVAL;
3719
3720         ceph_decode_need(&p, end, sizeof(u32), bad);
3721         epoch = ceph_decode_32(&p);
3722
3723         dout("handle_fsmap epoch %u\n", epoch);
3724
3725         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3726         struct_v = ceph_decode_8(&p);
3727         struct_cv = ceph_decode_8(&p);
3728         map_len = ceph_decode_32(&p);
3729
3730         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
3731         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
3732
3733         num_fs = ceph_decode_32(&p);
3734         while (num_fs-- > 0) {
3735                 void *info_p, *info_end;
3736                 u32 info_len;
3737                 u8 info_v, info_cv;
3738                 u32 fscid, namelen;
3739
3740                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
3741                 info_v = ceph_decode_8(&p);
3742                 info_cv = ceph_decode_8(&p);
3743                 info_len = ceph_decode_32(&p);
3744                 ceph_decode_need(&p, end, info_len, bad);
3745                 info_p = p;
3746                 info_end = p + info_len;
3747                 p = info_end;
3748
3749                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
3750                 fscid = ceph_decode_32(&info_p);
3751                 namelen = ceph_decode_32(&info_p);
3752                 ceph_decode_need(&info_p, info_end, namelen, bad);
3753
3754                 if (mds_namespace &&
3755                     strlen(mds_namespace) == namelen &&
3756                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
3757                         mount_fscid = fscid;
3758                         break;
3759                 }
3760         }
3761
3762         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
3763         if (mount_fscid != (u32)-1) {
3764                 fsc->client->monc.fs_cluster_id = mount_fscid;
3765                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
3766                                    0, true);
3767                 ceph_monc_renew_subs(&fsc->client->monc);
3768         } else {
3769                 err = -ENOENT;
3770                 goto err_out;
3771         }
3772         return;
3773 bad:
3774         pr_err("error decoding fsmap\n");
3775 err_out:
3776         mutex_lock(&mdsc->mutex);
3777         mdsc->mdsmap_err = -ENOENT;
3778         __wake_requests(mdsc, &mdsc->waiting_for_map);
3779         mutex_unlock(&mdsc->mutex);
3780         return;
3781 }
3782
3783 /*
3784  * handle mds map update.
3785  */
3786 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3787 {
3788         u32 epoch;
3789         u32 maplen;
3790         void *p = msg->front.iov_base;
3791         void *end = p + msg->front.iov_len;
3792         struct ceph_mdsmap *newmap, *oldmap;
3793         struct ceph_fsid fsid;
3794         int err = -EINVAL;
3795
3796         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3797         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3798         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3799                 return;
3800         epoch = ceph_decode_32(&p);
3801         maplen = ceph_decode_32(&p);
3802         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3803
3804         /* do we need it? */
3805         mutex_lock(&mdsc->mutex);
3806         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3807                 dout("handle_map epoch %u <= our %u\n",
3808                      epoch, mdsc->mdsmap->m_epoch);
3809                 mutex_unlock(&mdsc->mutex);
3810                 return;
3811         }
3812
3813         newmap = ceph_mdsmap_decode(&p, end);
3814         if (IS_ERR(newmap)) {
3815                 err = PTR_ERR(newmap);
3816                 goto bad_unlock;
3817         }
3818
3819         /* swap into place */
3820         if (mdsc->mdsmap) {
3821                 oldmap = mdsc->mdsmap;
3822                 mdsc->mdsmap = newmap;
3823                 check_new_map(mdsc, newmap, oldmap);
3824                 ceph_mdsmap_destroy(oldmap);
3825         } else {
3826                 mdsc->mdsmap = newmap;  /* first mds map */
3827         }
3828         mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3829
3830         __wake_requests(mdsc, &mdsc->waiting_for_map);
3831         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
3832                           mdsc->mdsmap->m_epoch);
3833
3834         mutex_unlock(&mdsc->mutex);
3835         schedule_delayed(mdsc);
3836         return;
3837
3838 bad_unlock:
3839         mutex_unlock(&mdsc->mutex);
3840 bad:
3841         pr_err("error decoding mdsmap %d\n", err);
3842         return;
3843 }
3844
3845 static struct ceph_connection *con_get(struct ceph_connection *con)
3846 {
3847         struct ceph_mds_session *s = con->private;
3848
3849         if (get_session(s)) {
3850                 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3851                 return con;
3852         }
3853         dout("mdsc con_get %p FAIL\n", s);
3854         return NULL;
3855 }
3856
3857 static void con_put(struct ceph_connection *con)
3858 {
3859         struct ceph_mds_session *s = con->private;
3860
3861         dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3862         ceph_put_mds_session(s);
3863 }
3864
3865 /*
3866  * if the client is unresponsive for long enough, the mds will kill
3867  * the session entirely.
3868  */
3869 static void peer_reset(struct ceph_connection *con)
3870 {
3871         struct ceph_mds_session *s = con->private;
3872         struct ceph_mds_client *mdsc = s->s_mdsc;
3873
3874         pr_warn("mds%d closed our session\n", s->s_mds);
3875         send_mds_reconnect(mdsc, s);
3876 }
3877
3878 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3879 {
3880         struct ceph_mds_session *s = con->private;
3881         struct ceph_mds_client *mdsc = s->s_mdsc;
3882         int type = le16_to_cpu(msg->hdr.type);
3883
3884         mutex_lock(&mdsc->mutex);
3885         if (__verify_registered_session(mdsc, s) < 0) {
3886                 mutex_unlock(&mdsc->mutex);
3887                 goto out;
3888         }
3889         mutex_unlock(&mdsc->mutex);
3890
3891         switch (type) {
3892         case CEPH_MSG_MDS_MAP:
3893                 ceph_mdsc_handle_mdsmap(mdsc, msg);
3894                 break;
3895         case CEPH_MSG_FS_MAP_USER:
3896                 ceph_mdsc_handle_fsmap(mdsc, msg);
3897                 break;
3898         case CEPH_MSG_CLIENT_SESSION:
3899                 handle_session(s, msg);
3900                 break;
3901         case CEPH_MSG_CLIENT_REPLY:
3902                 handle_reply(s, msg);
3903                 break;
3904         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3905                 handle_forward(mdsc, s, msg);
3906                 break;
3907         case CEPH_MSG_CLIENT_CAPS:
3908                 ceph_handle_caps(s, msg);
3909                 break;
3910         case CEPH_MSG_CLIENT_SNAP:
3911                 ceph_handle_snap(mdsc, s, msg);
3912                 break;
3913         case CEPH_MSG_CLIENT_LEASE:
3914                 handle_lease(mdsc, s, msg);
3915                 break;
3916
3917         default:
3918                 pr_err("received unknown message type %d %s\n", type,
3919                        ceph_msg_type_name(type));
3920         }
3921 out:
3922         ceph_msg_put(msg);
3923 }
3924
3925 /*
3926  * authentication
3927  */
3928
3929 /*
3930  * Note: returned pointer is the address of a structure that's
3931  * managed separately.  Caller must *not* attempt to free it.
3932  */
3933 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3934                                         int *proto, int force_new)
3935 {
3936         struct ceph_mds_session *s = con->private;
3937         struct ceph_mds_client *mdsc = s->s_mdsc;
3938         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3939         struct ceph_auth_handshake *auth = &s->s_auth;
3940
3941         if (force_new && auth->authorizer) {
3942                 ceph_auth_destroy_authorizer(auth->authorizer);
3943                 auth->authorizer = NULL;
3944         }
3945         if (!auth->authorizer) {
3946                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3947                                                       auth);
3948                 if (ret)
3949                         return ERR_PTR(ret);
3950         } else {
3951                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3952                                                       auth);
3953                 if (ret)
3954                         return ERR_PTR(ret);
3955         }
3956         *proto = ac->protocol;
3957
3958         return auth;
3959 }
3960
3961
3962 static int verify_authorizer_reply(struct ceph_connection *con)
3963 {
3964         struct ceph_mds_session *s = con->private;
3965         struct ceph_mds_client *mdsc = s->s_mdsc;
3966         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3967
3968         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
3969 }
3970
3971 static int invalidate_authorizer(struct ceph_connection *con)
3972 {
3973         struct ceph_mds_session *s = con->private;
3974         struct ceph_mds_client *mdsc = s->s_mdsc;
3975         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3976
3977         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3978
3979         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3980 }
3981
3982 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
3983                                 struct ceph_msg_header *hdr, int *skip)
3984 {
3985         struct ceph_msg *msg;
3986         int type = (int) le16_to_cpu(hdr->type);
3987         int front_len = (int) le32_to_cpu(hdr->front_len);
3988
3989         if (con->in_msg)
3990                 return con->in_msg;
3991
3992         *skip = 0;
3993         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
3994         if (!msg) {
3995                 pr_err("unable to allocate msg type %d len %d\n",
3996                        type, front_len);
3997                 return NULL;
3998         }
3999
4000         return msg;
4001 }
4002
4003 static int mds_sign_message(struct ceph_msg *msg)
4004 {
4005        struct ceph_mds_session *s = msg->con->private;
4006        struct ceph_auth_handshake *auth = &s->s_auth;
4007
4008        return ceph_auth_sign_message(auth, msg);
4009 }
4010
4011 static int mds_check_message_signature(struct ceph_msg *msg)
4012 {
4013        struct ceph_mds_session *s = msg->con->private;
4014        struct ceph_auth_handshake *auth = &s->s_auth;
4015
4016        return ceph_auth_check_message_signature(auth, msg);
4017 }
4018
4019 static const struct ceph_connection_operations mds_con_ops = {
4020         .get = con_get,
4021         .put = con_put,
4022         .dispatch = dispatch,
4023         .get_authorizer = get_authorizer,
4024         .verify_authorizer_reply = verify_authorizer_reply,
4025         .invalidate_authorizer = invalidate_authorizer,
4026         .peer_reset = peer_reset,
4027         .alloc_msg = mds_alloc_msg,
4028         .sign_message = mds_sign_message,
4029         .check_message_signature = mds_check_message_signature,
4030 };
4031
4032 /* eof */