1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/types.h>
34 #include "qed_dev_api.h"
38 #include "qed_reg_addr.h"
40 /* 16 nano second time quantas to wait before making a Drift adjustment */
41 #define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0
42 /* Nano seconds to add/subtract when making a Drift adjustment */
43 #define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28
44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */
45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
46 #define QED_TIMESTAMP_MASK BIT(16)
47 /* Param mask for Hardware to detect/timestamp the unicast PTP packets */
48 #define QED_PTP_UCAST_PARAM_MASK 0xF
50 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
52 switch (MFW_PORT(p_hwfn)) {
54 return QED_RESC_LOCK_PTP_PORT0;
56 return QED_RESC_LOCK_PTP_PORT1;
58 return QED_RESC_LOCK_PTP_PORT2;
60 return QED_RESC_LOCK_PTP_PORT3;
62 return QED_RESC_LOCK_RESC_INVALID;
66 static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
68 struct qed_resc_lock_params params;
69 enum qed_resc_lock resource;
72 resource = qed_ptcdev_to_resc(p_hwfn);
73 if (resource == QED_RESC_LOCK_RESC_INVALID)
76 qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true);
78 rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms);
79 if (rc && rc != -EINVAL) {
81 } else if (rc == -EINVAL) {
82 /* MFW doesn't support resource locking, first PF on the port
85 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
88 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
90 } else if (!rc && !params.b_granted) {
91 DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
98 static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
100 struct qed_resc_unlock_params params;
101 enum qed_resc_lock resource;
104 resource = qed_ptcdev_to_resc(p_hwfn);
105 if (resource == QED_RESC_LOCK_RESC_INVALID)
108 qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true);
110 rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms);
112 /* MFW doesn't support locking, first PF has lock ownership */
113 if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
116 DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
120 DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
126 /* Read Rx timestamp */
127 static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
129 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
130 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
134 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
135 if (!(val & QED_TIMESTAMP_MASK)) {
136 DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
140 val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
141 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
145 /* Reset timestamp register to allow new timestamp */
146 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
152 /* Read Tx timestamp */
153 static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
155 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
156 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
160 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
161 if (!(val & QED_TIMESTAMP_MASK)) {
162 DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
163 "Invalid Tx timestamp, buf_seqid = %08x\n", val);
167 val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
168 *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
172 /* Reset timestamp register to allow new timestamp */
173 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
178 /* Read Phy Hardware Clock */
179 static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
181 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
182 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
185 temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
186 *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
193 /* Filter PTP protocol packets that need to be timestamped */
194 static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
195 enum qed_ptp_filter_type rx_type,
196 enum qed_ptp_hwtstamp_tx_type tx_type)
198 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
199 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
200 u32 rule_mask, enable_cfg = 0x0;
203 case QED_PTP_FILTER_NONE:
207 case QED_PTP_FILTER_ALL:
211 case QED_PTP_FILTER_V1_L4_EVENT:
215 case QED_PTP_FILTER_V1_L4_GEN:
219 case QED_PTP_FILTER_V2_L4_EVENT:
223 case QED_PTP_FILTER_V2_L4_GEN:
227 case QED_PTP_FILTER_V2_L2_EVENT:
231 case QED_PTP_FILTER_V2_L2_GEN:
235 case QED_PTP_FILTER_V2_EVENT:
239 case QED_PTP_FILTER_V2_GEN:
244 DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
248 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
249 QED_PTP_UCAST_PARAM_MASK);
250 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
251 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
253 if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
254 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
255 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
256 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
258 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
259 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
260 QED_PTP_UCAST_PARAM_MASK);
261 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
264 /* Reset possibly old timestamps */
265 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
271 /* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
272 * FW/HW accepts the adjustment value in terms of 3 parameters:
273 * Drift period - adjustment happens once in certain number of nano seconds.
274 * Drift value - time is adjusted by a certain value, for example by 5 ns.
275 * Drift direction - add or subtract the adjustment value.
276 * The routine translates ppb into the adjustment triplet in an optimal manner.
278 static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
280 s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
281 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
282 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
283 u32 drift_ctr_cfg = 0, drift_state;
292 s64 best_dif = ppb, best_approx_dev = 1;
294 /* Adjustment value is up to +/-7ns, find an optimal value in
297 for (val = 7; val > 0; val--) {
298 period = div_s64(val * 1000000000, ppb);
303 if (period > 0xFFFFFFE)
306 /* Check both rounding ends for approximate error */
307 approx_dev = period * 16 + 8;
308 dif = ppb * approx_dev - val * 1000000000;
309 dif2 = dif + 16 * ppb;
316 /* Determine which end gives better approximation */
317 if (dif * (approx_dev + 16) > dif2 * approx_dev) {
323 /* Track best approximation found so far */
324 if (best_dif * approx_dev > dif * best_approx_dev) {
327 best_period = period;
328 best_approx_dev = approx_dev;
331 } else if (ppb == 1) {
332 /* This is a special case as its the only value which wouldn't
333 * fit in a s64 variable. In order to prevent castings simple
334 * handle it seperately.
337 best_period = 0xee6b27f;
340 best_period = 0xFFFFFFF;
343 drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
344 (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
345 (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
347 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
349 drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
350 if (drift_state & 1) {
351 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
354 DP_INFO(p_hwfn, "Drift counter is not reset\n");
358 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
363 static int qed_ptp_hw_enable(struct qed_dev *cdev)
365 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
366 struct qed_ptt *p_ptt;
369 p_ptt = qed_ptt_acquire(p_hwfn);
371 DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
375 p_hwfn->p_ptp_ptt = p_ptt;
377 rc = qed_ptp_res_lock(p_hwfn, p_ptt);
380 "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
381 qed_ptt_release(p_hwfn, p_ptt);
382 p_hwfn->p_ptp_ptt = NULL;
386 /* Reset PTP event detection rules - will be configured in the IOCTL */
387 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
388 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
389 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
390 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
392 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
393 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
395 qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
397 /* Pause free running counter */
398 if (QED_IS_BB_B0(p_hwfn->cdev))
399 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
400 if (QED_IS_AH(p_hwfn->cdev))
401 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
403 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
404 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
405 /* Resume free running counter */
406 if (QED_IS_BB_B0(p_hwfn->cdev))
407 qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
408 if (QED_IS_AH(p_hwfn->cdev)) {
409 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
410 qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
413 /* Disable drift register */
414 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
415 qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
417 /* Reset possibly old timestamps */
418 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
420 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
425 static int qed_ptp_hw_disable(struct qed_dev *cdev)
427 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
428 struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
430 qed_ptp_res_unlock(p_hwfn, p_ptt);
432 /* Reset PTP event detection rules */
433 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
434 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
436 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
437 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
439 /* Disable the PTP feature */
440 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
441 qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
443 qed_ptt_release(p_hwfn, p_ptt);
444 p_hwfn->p_ptp_ptt = NULL;
449 const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
450 .cfg_filters = qed_ptp_hw_cfg_filters,
451 .read_rx_ts = qed_ptp_hw_read_rx_ts,
452 .read_tx_ts = qed_ptp_hw_read_tx_ts,
453 .read_cc = qed_ptp_hw_read_cc,
454 .adjfreq = qed_ptp_hw_adjfreq,
455 .disable = qed_ptp_hw_disable,
456 .enable = qed_ptp_hw_enable,