Linux-libre 4.14.69-gnu
[librecmc/linux-libre.git] / drivers / staging / lustre / lustre / ptlrpc / events.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2015 Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_RPC
34
35 #include <linux/libcfs/libcfs.h>
36 # ifdef __mips64__
37 #  include <linux/kernel.h>
38 # endif
39
40 #include <obd_class.h>
41 #include <lustre_net.h>
42 #include <lustre_sec.h>
43 #include "ptlrpc_internal.h"
44
45 struct lnet_handle_eq ptlrpc_eq_h;
46
47 /*
48  *  Client's outgoing request callback
49  */
50 void request_out_callback(struct lnet_event *ev)
51 {
52         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
53         struct ptlrpc_request *req = cbid->cbid_arg;
54         bool wakeup = false;
55
56         LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
57         LASSERT(ev->unlinked);
58
59         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
60
61         sptlrpc_request_out_callback(req);
62
63         spin_lock(&req->rq_lock);
64         req->rq_real_sent = ktime_get_real_seconds();
65         req->rq_req_unlinked = 1;
66         /* reply_in_callback happened before request_out_callback? */
67         if (req->rq_reply_unlinked)
68                 wakeup = true;
69
70         if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
71                 /* Failed send: make it seem like the reply timed out, just
72                  * like failing sends in client.c does currently...
73                  */
74                 req->rq_net_err = 1;
75                 wakeup = true;
76         }
77
78         if (wakeup)
79                 ptlrpc_client_wake_req(req);
80
81         spin_unlock(&req->rq_lock);
82
83         ptlrpc_req_finished(req);
84 }
85
86 /*
87  * Client's incoming reply callback
88  */
89 void reply_in_callback(struct lnet_event *ev)
90 {
91         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
92         struct ptlrpc_request *req = cbid->cbid_arg;
93
94         DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
95
96         LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
97         LASSERT(ev->md.start == req->rq_repbuf);
98         LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
99         /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
100          * for adaptive timeouts' early reply.
101          */
102         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
103
104         spin_lock(&req->rq_lock);
105
106         req->rq_receiving_reply = 0;
107         req->rq_early = 0;
108         if (ev->unlinked)
109                 req->rq_reply_unlinked = 1;
110
111         if (ev->status)
112                 goto out_wake;
113
114         if (ev->type == LNET_EVENT_UNLINK) {
115                 LASSERT(ev->unlinked);
116                 DEBUG_REQ(D_NET, req, "unlink");
117                 goto out_wake;
118         }
119
120         if (ev->mlength < ev->rlength) {
121                 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
122                        req->rq_replen, ev->rlength, ev->offset);
123                 req->rq_reply_truncated = 1;
124                 req->rq_replied = 1;
125                 req->rq_status = -EOVERFLOW;
126                 req->rq_nob_received = ev->rlength + ev->offset;
127                 goto out_wake;
128         }
129
130         if ((ev->offset == 0) &&
131             ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
132                 /* Early reply */
133                 DEBUG_REQ(D_ADAPTTO, req,
134                           "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
135                           ev->mlength, ev->offset,
136                           req->rq_replen, req->rq_replied, ev->unlinked);
137
138                 req->rq_early_count++; /* number received, client side */
139
140                 /* already got the real reply or buffers are already unlinked */
141                 if (req->rq_replied || req->rq_reply_unlinked == 1)
142                         goto out_wake;
143
144                 req->rq_early = 1;
145                 req->rq_reply_off = ev->offset;
146                 req->rq_nob_received = ev->mlength;
147                 /* And we're still receiving */
148                 req->rq_receiving_reply = 1;
149         } else {
150                 /* Real reply */
151                 req->rq_rep_swab_mask = 0;
152                 req->rq_replied = 1;
153                 /* Got reply, no resend required */
154                 req->rq_resend = 0;
155                 req->rq_reply_off = ev->offset;
156                 req->rq_nob_received = ev->mlength;
157                 /* LNetMDUnlink can't be called under the LNET_LOCK,
158                  * so we must unlink in ptlrpc_unregister_reply
159                  */
160                 DEBUG_REQ(D_INFO, req,
161                           "reply in flags=%x mlen=%u offset=%d replen=%d",
162                           lustre_msg_get_flags(req->rq_reqmsg),
163                           ev->mlength, ev->offset, req->rq_replen);
164         }
165
166         req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
167
168 out_wake:
169         /* NB don't unlock till after wakeup; req can disappear under us
170          * since we don't have our own ref
171          */
172         ptlrpc_client_wake_req(req);
173         spin_unlock(&req->rq_lock);
174 }
175
176 /*
177  * Client's bulk has been written/read
178  */
179 void client_bulk_callback(struct lnet_event *ev)
180 {
181         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
182         struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
183         struct ptlrpc_request *req;
184
185         LASSERT((ptlrpc_is_bulk_put_sink(desc->bd_type) &&
186                  ev->type == LNET_EVENT_PUT) ||
187                 (ptlrpc_is_bulk_get_source(desc->bd_type) &&
188                  ev->type == LNET_EVENT_GET) ||
189                 ev->type == LNET_EVENT_UNLINK);
190         LASSERT(ev->unlinked);
191
192         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
193                 ev->status = -EIO;
194
195         if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
196                                  CFS_FAIL_ONCE))
197                 ev->status = -EIO;
198
199         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
200                "event type %d, status %d, desc %p\n",
201                ev->type, ev->status, desc);
202
203         spin_lock(&desc->bd_lock);
204         req = desc->bd_req;
205         LASSERT(desc->bd_md_count > 0);
206         desc->bd_md_count--;
207
208         if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
209                 desc->bd_nob_transferred += ev->mlength;
210                 desc->bd_sender = ev->sender;
211         } else {
212                 /* start reconnect and resend if network error hit */
213                 spin_lock(&req->rq_lock);
214                 req->rq_net_err = 1;
215                 spin_unlock(&req->rq_lock);
216         }
217
218         if (ev->status != 0)
219                 desc->bd_failure = 1;
220
221         /* NB don't unlock till after wakeup; desc can disappear under us
222          * otherwise
223          */
224         if (desc->bd_md_count == 0)
225                 ptlrpc_client_wake_req(desc->bd_req);
226
227         spin_unlock(&desc->bd_lock);
228 }
229
230 /*
231  * We will have percpt request history list for ptlrpc service in upcoming
232  * patches because we don't want to be serialized by current per-service
233  * history operations. So we require history ID can (somehow) show arriving
234  * order w/o grabbing global lock, and user can sort them in userspace.
235  *
236  * This is how we generate history ID for ptlrpc_request:
237  * ----------------------------------------------------
238  * |  32 bits  |  16 bits  | (16 - X)bits  |  X bits  |
239  * ----------------------------------------------------
240  * |  seconds  | usec / 16 |   sequence    | CPT id   |
241  * ----------------------------------------------------
242  *
243  * it might not be precise but should be good enough.
244  */
245
246 #define REQS_CPT_BITS(svcpt)    ((svcpt)->scp_service->srv_cpt_bits)
247
248 #define REQS_SEC_SHIFT          32
249 #define REQS_USEC_SHIFT         16
250 #define REQS_SEQ_SHIFT(svcpt)   REQS_CPT_BITS(svcpt)
251
252 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
253                                    struct ptlrpc_request *req)
254 {
255         __u64 sec = req->rq_arrival_time.tv_sec;
256         __u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
257         __u64 new_seq;
258
259         /* set sequence ID for request and add it to history list,
260          * it must be called with hold svcpt::scp_lock
261          */
262
263         new_seq = (sec << REQS_SEC_SHIFT) |
264                   (usec << REQS_USEC_SHIFT) |
265                   (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
266
267         if (new_seq > svcpt->scp_hist_seq) {
268                 /* This handles the initial case of scp_hist_seq == 0 or
269                  * we just jumped into a new time window
270                  */
271                 svcpt->scp_hist_seq = new_seq;
272         } else {
273                 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
274                 /* NB: increase sequence number in current usec bucket,
275                  * however, it's possible that we used up all bits for
276                  * sequence and jumped into the next usec bucket (future time),
277                  * then we hope there will be less RPCs per bucket at some
278                  * point, and sequence will catch up again
279                  */
280                 svcpt->scp_hist_seq += (1ULL << REQS_SEQ_SHIFT(svcpt));
281                 new_seq = svcpt->scp_hist_seq;
282         }
283
284         req->rq_history_seq = new_seq;
285
286         list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
287 }
288
289 /*
290  * Server's incoming request callback
291  */
292 void request_in_callback(struct lnet_event *ev)
293 {
294         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
295         struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
296         struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
297         struct ptlrpc_service *service = svcpt->scp_service;
298         struct ptlrpc_request *req;
299
300         LASSERT(ev->type == LNET_EVENT_PUT ||
301                 ev->type == LNET_EVENT_UNLINK);
302         LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
303         LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
304                 rqbd->rqbd_buffer + service->srv_buf_size);
305
306         CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
307                "event type %d, status %d, service %s\n",
308                ev->type, ev->status, service->srv_name);
309
310         if (ev->unlinked) {
311                 /* If this is the last request message to fit in the
312                  * request buffer we can use the request object embedded in
313                  * rqbd.  Note that if we failed to allocate a request,
314                  * we'd have to re-post the rqbd, which we can't do in this
315                  * context.
316                  */
317                 req = &rqbd->rqbd_req;
318                 memset(req, 0, sizeof(*req));
319         } else {
320                 LASSERT(ev->type == LNET_EVENT_PUT);
321                 if (ev->status != 0) {
322                         /* We moaned above already... */
323                         return;
324                 }
325                 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
326                 if (!req) {
327                         CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
328                                service->srv_name,
329                                libcfs_id2str(ev->initiator));
330                         return;
331                 }
332         }
333
334         ptlrpc_srv_req_init(req);
335         /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
336          * flags are reset and scalars are zero.  We only set the message
337          * size to non-zero if this was a successful receive.
338          */
339         req->rq_xid = ev->match_bits;
340         req->rq_reqbuf = ev->md.start + ev->offset;
341         if (ev->type == LNET_EVENT_PUT && ev->status == 0)
342                 req->rq_reqdata_len = ev->mlength;
343         ktime_get_real_ts64(&req->rq_arrival_time);
344         req->rq_peer = ev->initiator;
345         req->rq_self = ev->target.nid;
346         req->rq_rqbd = rqbd;
347         req->rq_phase = RQ_PHASE_NEW;
348         if (ev->type == LNET_EVENT_PUT)
349                 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
350                        req, req->rq_xid, ev->mlength);
351
352         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
353
354         spin_lock(&svcpt->scp_lock);
355
356         ptlrpc_req_add_history(svcpt, req);
357
358         if (ev->unlinked) {
359                 svcpt->scp_nrqbds_posted--;
360                 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
361                        svcpt->scp_nrqbds_posted);
362
363                 /* Normally, don't complain about 0 buffers posted; LNET won't
364                  * drop incoming reqs since we set the portal lazy
365                  */
366                 if (test_req_buffer_pressure &&
367                     ev->type != LNET_EVENT_UNLINK &&
368                     svcpt->scp_nrqbds_posted == 0)
369                         CWARN("All %s request buffers busy\n",
370                               service->srv_name);
371
372                 /* req takes over the network's ref on rqbd */
373         } else {
374                 /* req takes a ref on rqbd */
375                 rqbd->rqbd_refcount++;
376         }
377
378         list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
379         svcpt->scp_nreqs_incoming++;
380
381         /* NB everything can disappear under us once the request
382          * has been queued and we unlock, so do the wake now...
383          */
384         wake_up(&svcpt->scp_waitq);
385
386         spin_unlock(&svcpt->scp_lock);
387 }
388
389 /*
390  *  Server's outgoing reply callback
391  */
392 void reply_out_callback(struct lnet_event *ev)
393 {
394         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
395         struct ptlrpc_reply_state *rs = cbid->cbid_arg;
396         struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
397
398         LASSERT(ev->type == LNET_EVENT_SEND ||
399                 ev->type == LNET_EVENT_ACK ||
400                 ev->type == LNET_EVENT_UNLINK);
401
402         if (!rs->rs_difficult) {
403                 /* 'Easy' replies have no further processing so I drop the
404                  * net's ref on 'rs'
405                  */
406                 LASSERT(ev->unlinked);
407                 ptlrpc_rs_decref(rs);
408                 return;
409         }
410
411         LASSERT(rs->rs_on_net);
412
413         if (ev->unlinked) {
414                 /* Last network callback. The net's ref on 'rs' stays put
415                  * until ptlrpc_handle_rs() is done with it
416                  */
417                 spin_lock(&svcpt->scp_rep_lock);
418                 spin_lock(&rs->rs_lock);
419
420                 rs->rs_on_net = 0;
421                 if (!rs->rs_no_ack ||
422                     rs->rs_transno <=
423                     rs->rs_export->exp_obd->obd_last_committed ||
424                     list_empty(&rs->rs_obd_list))
425                         ptlrpc_schedule_difficult_reply(rs);
426
427                 spin_unlock(&rs->rs_lock);
428                 spin_unlock(&svcpt->scp_rep_lock);
429         }
430 }
431
432 static void ptlrpc_master_callback(struct lnet_event *ev)
433 {
434         struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
435         void (*callback)(struct lnet_event *ev) = cbid->cbid_fn;
436
437         /* Honestly, it's best to find out early. */
438         LASSERT(cbid->cbid_arg != LP_POISON);
439         LASSERT(callback == request_out_callback ||
440                 callback == reply_in_callback ||
441                 callback == client_bulk_callback ||
442                 callback == request_in_callback ||
443                 callback == reply_out_callback);
444
445         callback(ev);
446 }
447
448 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
449                         struct lnet_process_id *peer, lnet_nid_t *self)
450 {
451         int best_dist = 0;
452         __u32 best_order = 0;
453         int count = 0;
454         int rc = -ENOENT;
455         int dist;
456         __u32 order;
457         lnet_nid_t dst_nid;
458         lnet_nid_t src_nid;
459
460         peer->pid = LNET_PID_LUSTRE;
461
462         /* Choose the matching UUID that's closest */
463         while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
464                 dist = LNetDist(dst_nid, &src_nid, &order);
465                 if (dist < 0)
466                         continue;
467
468                 if (dist == 0) {                /* local! use loopback LND */
469                         peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
470                         rc = 0;
471                         break;
472                 }
473
474                 if (rc < 0 ||
475                     dist < best_dist ||
476                     (dist == best_dist && order < best_order)) {
477                         best_dist = dist;
478                         best_order = order;
479
480                         peer->nid = dst_nid;
481                         *self = src_nid;
482                         rc = 0;
483                 }
484         }
485
486         CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
487         return rc;
488 }
489
490 static void ptlrpc_ni_fini(void)
491 {
492         wait_queue_head_t waitq;
493         struct l_wait_info lwi;
494         int rc;
495         int retries;
496
497         /* Wait for the event queue to become idle since there may still be
498          * messages in flight with pending events (i.e. the fire-and-forget
499          * messages == client requests and "non-difficult" server
500          * replies
501          */
502
503         for (retries = 0;; retries++) {
504                 rc = LNetEQFree(ptlrpc_eq_h);
505                 switch (rc) {
506                 default:
507                         LBUG();
508
509                 case 0:
510                         LNetNIFini();
511                         return;
512
513                 case -EBUSY:
514                         if (retries != 0)
515                                 CWARN("Event queue still busy\n");
516
517                         /* Wait for a bit */
518                         init_waitqueue_head(&waitq);
519                         lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
520                         l_wait_event(waitq, 0, &lwi);
521                         break;
522                 }
523         }
524         /* notreached */
525 }
526
527 static lnet_pid_t ptl_get_pid(void)
528 {
529         lnet_pid_t pid;
530
531         pid = LNET_PID_LUSTRE;
532         return pid;
533 }
534
535 static int ptlrpc_ni_init(void)
536 {
537         int rc;
538         lnet_pid_t pid;
539
540         pid = ptl_get_pid();
541         CDEBUG(D_NET, "My pid is: %x\n", pid);
542
543         /* We're not passing any limits yet... */
544         rc = LNetNIInit(pid);
545         if (rc < 0) {
546                 CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
547                 return rc;
548         }
549
550         /* CAVEAT EMPTOR: how we process portals events is _radically_
551          * different depending on...
552          */
553         /* kernel LNet calls our master callback when there are new event,
554          * because we are guaranteed to get every event via callback,
555          * so we just set EQ size to 0 to avoid overhead of serializing
556          * enqueue/dequeue operations in LNet.
557          */
558         rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
559         if (rc == 0)
560                 return 0;
561
562         CERROR("Failed to allocate event queue: %d\n", rc);
563         LNetNIFini();
564
565         return rc;
566 }
567
568 int ptlrpc_init_portals(void)
569 {
570         int rc = ptlrpc_ni_init();
571
572         if (rc != 0) {
573                 CERROR("network initialisation failed\n");
574                 return rc;
575         }
576         rc = ptlrpcd_addref();
577         if (rc == 0)
578                 return 0;
579
580         CERROR("rpcd initialisation failed\n");
581         ptlrpc_ni_fini();
582         return rc;
583 }
584
585 void ptlrpc_exit_portals(void)
586 {
587         ptlrpcd_decref();
588         ptlrpc_ni_fini();
589 }