4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/lnet/lib-lnet.h>
38 #include <uapi/linux/lnet/lnet-dlc.h>
41 lnet_peer_tables_create(void)
43 struct lnet_peer_table *ptable;
44 struct list_head *hash;
48 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
50 if (!the_lnet.ln_peer_tables) {
51 CERROR("Failed to allocate cpu-partition peer tables\n");
55 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
56 INIT_LIST_HEAD(&ptable->pt_deathrow);
58 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
59 LNET_PEER_HASH_SIZE * sizeof(*hash));
61 CERROR("Failed to create peer hash table\n");
62 lnet_peer_tables_destroy();
66 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
67 INIT_LIST_HEAD(&hash[j]);
68 ptable->pt_hash = hash; /* sign of initialization */
75 lnet_peer_tables_destroy(void)
77 struct lnet_peer_table *ptable;
78 struct list_head *hash;
82 if (!the_lnet.ln_peer_tables)
85 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
86 hash = ptable->pt_hash;
87 if (!hash) /* not initialized */
90 LASSERT(list_empty(&ptable->pt_deathrow));
92 ptable->pt_hash = NULL;
93 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
94 LASSERT(list_empty(&hash[j]));
96 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
99 cfs_percpt_free(the_lnet.ln_peer_tables);
100 the_lnet.ln_peer_tables = NULL;
104 lnet_peer_table_cleanup_locked(struct lnet_ni *ni,
105 struct lnet_peer_table *ptable)
108 struct lnet_peer *lp;
109 struct lnet_peer *tmp;
111 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
112 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
114 if (ni && ni != lp->lp_ni)
116 list_del_init(&lp->lp_hashlist);
117 /* Lose hash table's ref */
118 ptable->pt_zombies++;
119 lnet_peer_decref_locked(lp);
125 lnet_peer_table_deathrow_wait_locked(struct lnet_peer_table *ptable,
130 for (i = 3; ptable->pt_zombies; i++) {
131 lnet_net_unlock(cpt_locked);
133 if (is_power_of_2(i)) {
135 "Waiting for %d zombies on peer table\n",
138 set_current_state(TASK_UNINTERRUPTIBLE);
139 schedule_timeout(cfs_time_seconds(1) >> 1);
140 lnet_net_lock(cpt_locked);
145 lnet_peer_table_del_rtrs_locked(struct lnet_ni *ni,
146 struct lnet_peer_table *ptable,
149 struct lnet_peer *lp;
150 struct lnet_peer *tmp;
154 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
155 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
160 if (!lp->lp_rtr_refcount)
165 lnet_net_unlock(cpt_locked);
166 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lp_nid);
167 lnet_net_lock(cpt_locked);
173 lnet_peer_tables_cleanup(struct lnet_ni *ni)
175 struct lnet_peer_table *ptable;
176 struct list_head deathrow;
177 struct lnet_peer *lp;
178 struct lnet_peer *temp;
181 INIT_LIST_HEAD(&deathrow);
183 LASSERT(the_lnet.ln_shutdown || ni);
185 * If just deleting the peers for a NI, get rid of any routes these
186 * peers are gateways for.
188 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
190 lnet_peer_table_del_rtrs_locked(ni, ptable, i);
195 * Start the process of moving the applicable peers to
198 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
200 lnet_peer_table_cleanup_locked(ni, ptable);
204 /* Cleanup all entries on deathrow. */
205 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
207 lnet_peer_table_deathrow_wait_locked(ptable, i);
208 list_splice_init(&ptable->pt_deathrow, &deathrow);
212 list_for_each_entry_safe(lp, temp, &deathrow, lp_hashlist) {
213 list_del(&lp->lp_hashlist);
214 LIBCFS_FREE(lp, sizeof(*lp));
219 lnet_destroy_peer_locked(struct lnet_peer *lp)
221 struct lnet_peer_table *ptable;
223 LASSERT(!lp->lp_refcount);
224 LASSERT(!lp->lp_rtr_refcount);
225 LASSERT(list_empty(&lp->lp_txq));
226 LASSERT(list_empty(&lp->lp_hashlist));
227 LASSERT(!lp->lp_txqnob);
229 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
230 LASSERT(ptable->pt_number > 0);
233 lnet_ni_decref_locked(lp->lp_ni, lp->lp_cpt);
236 list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
237 LASSERT(ptable->pt_zombies > 0);
238 ptable->pt_zombies--;
242 lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
244 struct list_head *peers;
245 struct lnet_peer *lp;
247 LASSERT(!the_lnet.ln_shutdown);
249 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
250 list_for_each_entry(lp, peers, lp_hashlist) {
251 if (lp->lp_nid == nid) {
252 lnet_peer_addref_locked(lp);
261 lnet_nid2peer_locked(struct lnet_peer **lpp, lnet_nid_t nid, int cpt)
263 struct lnet_peer_table *ptable;
264 struct lnet_peer *lp = NULL;
265 struct lnet_peer *lp2;
270 if (the_lnet.ln_shutdown) /* it's shutting down */
273 /* cpt can be LNET_LOCK_EX if it's called from router functions */
274 cpt2 = cpt != LNET_LOCK_EX ? cpt : lnet_cpt_of_nid_locked(nid);
276 ptable = the_lnet.ln_peer_tables[cpt2];
277 lp = lnet_find_peer_locked(ptable, nid);
283 if (!list_empty(&ptable->pt_deathrow)) {
284 lp = list_entry(ptable->pt_deathrow.next,
285 struct lnet_peer, lp_hashlist);
286 list_del(&lp->lp_hashlist);
290 * take extra refcount in case another thread has shutdown LNet
291 * and destroyed locks and peer-table before I finish the allocation
294 lnet_net_unlock(cpt);
297 memset(lp, 0, sizeof(*lp));
299 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), cpt2, sizeof(*lp));
307 INIT_LIST_HEAD(&lp->lp_txq);
308 INIT_LIST_HEAD(&lp->lp_rtrq);
309 INIT_LIST_HEAD(&lp->lp_routes);
312 lp->lp_notifylnd = 0;
313 lp->lp_notifying = 0;
314 lp->lp_alive_count = 0;
315 lp->lp_timestamp = 0;
316 lp->lp_alive = !lnet_peers_start_down(); /* 1 bit!! */
317 lp->lp_last_alive = cfs_time_current(); /* assumes alive */
318 lp->lp_last_query = 0; /* haven't asked NI yet */
319 lp->lp_ping_timestamp = 0;
320 lp->lp_ping_feats = LNET_PING_FEAT_INVAL;
323 lp->lp_refcount = 2; /* 1 for caller; 1 for hash */
324 lp->lp_rtr_refcount = 0;
328 if (the_lnet.ln_shutdown) {
333 lp2 = lnet_find_peer_locked(ptable, nid);
339 lp->lp_ni = lnet_net2ni_locked(LNET_NIDNET(nid), cpt2);
345 lp->lp_txcredits = lp->lp_ni->ni_peertxcredits;
346 lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits;
347 lp->lp_rtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
348 lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni);
350 list_add_tail(&lp->lp_hashlist,
351 &ptable->pt_hash[lnet_nid2peerhash(nid)]);
352 ptable->pt_version++;
358 list_add(&lp->lp_hashlist, &ptable->pt_deathrow);
364 lnet_debug_peer(lnet_nid_t nid)
366 char *aliveness = "NA";
367 struct lnet_peer *lp;
371 cpt = lnet_cpt_of_nid(nid);
374 rc = lnet_nid2peer_locked(&lp, nid, cpt);
376 lnet_net_unlock(cpt);
377 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
381 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
382 aliveness = lp->lp_alive ? "up" : "down";
384 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
385 libcfs_nid2str(lp->lp_nid), lp->lp_refcount,
386 aliveness, lp->lp_ni->ni_peertxcredits,
387 lp->lp_rtrcredits, lp->lp_minrtrcredits,
388 lp->lp_txcredits, lp->lp_mintxcredits, lp->lp_txqnob);
390 lnet_peer_decref_locked(lp);
392 lnet_net_unlock(cpt);
396 lnet_get_peer_info(__u32 peer_index, __u64 *nid,
397 char aliveness[LNET_MAX_STR_LEN],
398 __u32 *cpt_iter, __u32 *refcount,
399 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
400 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
403 struct lnet_peer_table *peer_table;
404 struct lnet_peer *lp;
408 /* get the number of CPTs */
409 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
412 * if the cpt number to be examined is >= the number of cpts in
413 * the system then indicate that there are no more cpts to examin
415 if (*cpt_iter >= lncpt)
418 /* get the current table */
419 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
420 /* if the ptable is NULL then there are no more cpts to examine */
424 lnet_net_lock(*cpt_iter);
426 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
427 struct list_head *peers = &peer_table->pt_hash[j];
429 list_for_each_entry(lp, peers, lp_hashlist) {
430 if (peer_index-- > 0)
433 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
434 if (lnet_isrouter(lp) ||
435 lnet_peer_aliveness_enabled(lp))
436 snprintf(aliveness, LNET_MAX_STR_LEN,
437 lp->lp_alive ? "up" : "down");
440 *refcount = lp->lp_refcount;
441 *ni_peer_tx_credits = lp->lp_ni->ni_peertxcredits;
442 *peer_tx_credits = lp->lp_txcredits;
443 *peer_rtr_credits = lp->lp_rtrcredits;
444 *peer_min_rtr_credits = lp->lp_mintxcredits;
445 *peer_tx_qnob = lp->lp_txqnob;
450 lnet_net_unlock(*cpt_iter);
454 return found ? 0 : -ENOENT;