2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <rdma/mlx5-abi.h>
41 MLX5_CYCLES_SHIFT = 23
45 MLX5_PIN_MODE_IN = 0x0,
46 MLX5_PIN_MODE_OUT = 0x1,
50 MLX5_OUT_PATTERN_PULSE = 0x0,
51 MLX5_OUT_PATTERN_PERIODIC = 0x1,
55 MLX5_EVENT_MODE_DISABLE = 0x0,
56 MLX5_EVENT_MODE_REPETETIVE = 0x1,
57 MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
61 MLX5_MTPPS_FS_ENABLE = BIT(0x0),
62 MLX5_MTPPS_FS_PATTERN = BIT(0x2),
63 MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
64 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
65 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
66 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
69 static u64 read_internal_timer(const struct cyclecounter *cc)
71 struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
72 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
75 return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
78 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
80 struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
81 struct mlx5_clock *clock = &mdev->clock;
87 sign = smp_load_acquire(&clock_info->sign);
88 smp_store_mb(clock_info->sign,
89 sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
91 clock_info->cycles = clock->tc.cycle_last;
92 clock_info->mult = clock->cycles.mult;
93 clock_info->nsec = clock->tc.nsec;
94 clock_info->frac = clock->tc.frac;
96 smp_store_release(&clock_info->sign,
97 sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
100 static void mlx5_pps_out(struct work_struct *work)
102 struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
104 struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
106 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
108 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
112 for (i = 0; i < clock->ptp_info.n_pins; i++) {
115 write_seqlock_irqsave(&clock->lock, flags);
116 tstart = clock->pps_info.start[i];
117 clock->pps_info.start[i] = 0;
118 write_sequnlock_irqrestore(&clock->lock, flags);
122 MLX5_SET(mtpps_reg, in, pin, i);
123 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
124 MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
125 mlx5_set_mtpps(mdev, in, sizeof(in));
129 static void mlx5_timestamp_overflow(struct work_struct *work)
131 struct delayed_work *dwork = to_delayed_work(work);
132 struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
136 write_seqlock_irqsave(&clock->lock, flags);
137 timecounter_read(&clock->tc);
138 mlx5_update_clock_info_page(clock->mdev);
139 write_sequnlock_irqrestore(&clock->lock, flags);
140 schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
143 static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
144 const struct timespec64 *ts)
146 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
148 u64 ns = timespec64_to_ns(ts);
151 write_seqlock_irqsave(&clock->lock, flags);
152 timecounter_init(&clock->tc, &clock->cycles, ns);
153 mlx5_update_clock_info_page(clock->mdev);
154 write_sequnlock_irqrestore(&clock->lock, flags);
159 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
160 struct ptp_system_timestamp *sts)
162 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
164 struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
169 write_seqlock_irqsave(&clock->lock, flags);
170 cycles = mlx5_read_internal_timer(mdev, sts);
171 ns = timecounter_cyc2time(&clock->tc, cycles);
172 write_sequnlock_irqrestore(&clock->lock, flags);
174 *ts = ns_to_timespec64(ns);
179 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
181 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
185 write_seqlock_irqsave(&clock->lock, flags);
186 timecounter_adjtime(&clock->tc, delta);
187 mlx5_update_clock_info_page(clock->mdev);
188 write_sequnlock_irqrestore(&clock->lock, flags);
193 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
199 struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
207 adj = clock->nominal_c_mult;
209 diff = div_u64(adj, 1000000000ULL);
211 write_seqlock_irqsave(&clock->lock, flags);
212 timecounter_read(&clock->tc);
213 clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
214 clock->nominal_c_mult + diff;
215 mlx5_update_clock_info_page(clock->mdev);
216 write_sequnlock_irqrestore(&clock->lock, flags);
221 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
222 struct ptp_clock_request *rq,
225 struct mlx5_clock *clock =
226 container_of(ptp, struct mlx5_clock, ptp_info);
227 struct mlx5_core_dev *mdev =
228 container_of(clock, struct mlx5_core_dev, clock);
229 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
230 u32 field_select = 0;
236 if (!MLX5_PPS_CAP(mdev))
239 if (rq->extts.index >= clock->ptp_info.n_pins)
243 pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
246 pin_mode = MLX5_PIN_MODE_IN;
247 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
248 field_select = MLX5_MTPPS_FS_PIN_MODE |
249 MLX5_MTPPS_FS_PATTERN |
250 MLX5_MTPPS_FS_ENABLE;
252 pin = rq->extts.index;
253 field_select = MLX5_MTPPS_FS_ENABLE;
256 MLX5_SET(mtpps_reg, in, pin, pin);
257 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
258 MLX5_SET(mtpps_reg, in, pattern, pattern);
259 MLX5_SET(mtpps_reg, in, enable, on);
260 MLX5_SET(mtpps_reg, in, field_select, field_select);
262 err = mlx5_set_mtpps(mdev, in, sizeof(in));
266 return mlx5_set_mtppse(mdev, pin, 0,
267 MLX5_EVENT_MODE_REPETETIVE & on);
270 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
271 struct ptp_clock_request *rq,
274 struct mlx5_clock *clock =
275 container_of(ptp, struct mlx5_clock, ptp_info);
276 struct mlx5_core_dev *mdev =
277 container_of(clock, struct mlx5_core_dev, clock);
278 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
279 u64 nsec_now, nsec_delta, time_stamp = 0;
280 u64 cycles_now, cycles_delta;
281 struct timespec64 ts;
283 u32 field_select = 0;
290 if (!MLX5_PPS_CAP(mdev))
293 if (rq->perout.index >= clock->ptp_info.n_pins)
297 pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
302 pin_mode = MLX5_PIN_MODE_OUT;
303 pattern = MLX5_OUT_PATTERN_PERIODIC;
304 ts.tv_sec = rq->perout.period.sec;
305 ts.tv_nsec = rq->perout.period.nsec;
306 ns = timespec64_to_ns(&ts);
308 if ((ns >> 1) != 500000000LL)
311 ts.tv_sec = rq->perout.start.sec;
312 ts.tv_nsec = rq->perout.start.nsec;
313 ns = timespec64_to_ns(&ts);
314 cycles_now = mlx5_read_internal_timer(mdev, NULL);
315 write_seqlock_irqsave(&clock->lock, flags);
316 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
317 nsec_delta = ns - nsec_now;
318 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
320 write_sequnlock_irqrestore(&clock->lock, flags);
321 time_stamp = cycles_now + cycles_delta;
322 field_select = MLX5_MTPPS_FS_PIN_MODE |
323 MLX5_MTPPS_FS_PATTERN |
324 MLX5_MTPPS_FS_ENABLE |
325 MLX5_MTPPS_FS_TIME_STAMP;
327 pin = rq->perout.index;
328 field_select = MLX5_MTPPS_FS_ENABLE;
331 MLX5_SET(mtpps_reg, in, pin, pin);
332 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
333 MLX5_SET(mtpps_reg, in, pattern, pattern);
334 MLX5_SET(mtpps_reg, in, enable, on);
335 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
336 MLX5_SET(mtpps_reg, in, field_select, field_select);
338 err = mlx5_set_mtpps(mdev, in, sizeof(in));
342 return mlx5_set_mtppse(mdev, pin, 0,
343 MLX5_EVENT_MODE_REPETETIVE & on);
346 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
347 struct ptp_clock_request *rq,
350 struct mlx5_clock *clock =
351 container_of(ptp, struct mlx5_clock, ptp_info);
353 clock->pps_info.enabled = !!on;
357 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
358 struct ptp_clock_request *rq,
362 case PTP_CLK_REQ_EXTTS:
363 return mlx5_extts_configure(ptp, rq, on);
364 case PTP_CLK_REQ_PEROUT:
365 return mlx5_perout_configure(ptp, rq, on);
366 case PTP_CLK_REQ_PPS:
367 return mlx5_pps_configure(ptp, rq, on);
374 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
375 enum ptp_pin_function func, unsigned int chan)
377 return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
380 static const struct ptp_clock_info mlx5_ptp_clock_info = {
381 .owner = THIS_MODULE,
383 .max_adj = 100000000,
389 .adjfreq = mlx5_ptp_adjfreq,
390 .adjtime = mlx5_ptp_adjtime,
391 .gettimex64 = mlx5_ptp_gettimex,
392 .settime64 = mlx5_ptp_settime,
397 static int mlx5_init_pin_config(struct mlx5_clock *clock)
401 clock->ptp_info.pin_config =
402 kcalloc(clock->ptp_info.n_pins,
403 sizeof(*clock->ptp_info.pin_config),
405 if (!clock->ptp_info.pin_config)
407 clock->ptp_info.enable = mlx5_ptp_enable;
408 clock->ptp_info.verify = mlx5_ptp_verify;
409 clock->ptp_info.pps = 1;
411 for (i = 0; i < clock->ptp_info.n_pins; i++) {
412 snprintf(clock->ptp_info.pin_config[i].name,
413 sizeof(clock->ptp_info.pin_config[i].name),
415 clock->ptp_info.pin_config[i].index = i;
416 clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
417 clock->ptp_info.pin_config[i].chan = i;
423 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
425 struct mlx5_clock *clock = &mdev->clock;
426 u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
428 mlx5_query_mtpps(mdev, out, sizeof(out));
430 clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
431 cap_number_of_pps_pins);
432 clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
433 cap_max_num_of_pps_in_pins);
434 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
435 cap_max_num_of_pps_out_pins);
437 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
438 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
439 clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
440 clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
441 clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
442 clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
443 clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
444 clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
447 static int mlx5_pps_event(struct notifier_block *nb,
448 unsigned long type, void *data)
450 struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
451 struct mlx5_core_dev *mdev = clock->mdev;
452 struct ptp_clock_event ptp_event;
453 u64 cycles_now, cycles_delta;
454 u64 nsec_now, nsec_delta, ns;
455 struct mlx5_eqe *eqe = data;
456 int pin = eqe->data.pps.pin;
457 struct timespec64 ts;
460 switch (clock->ptp_info.pin_config[pin].func) {
462 ptp_event.index = pin;
463 ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
464 be64_to_cpu(eqe->data.pps.time_stamp));
465 if (clock->pps_info.enabled) {
466 ptp_event.type = PTP_CLOCK_PPSUSR;
467 ptp_event.pps_times.ts_real =
468 ns_to_timespec64(ptp_event.timestamp);
470 ptp_event.type = PTP_CLOCK_EXTTS;
472 /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
473 ptp_clock_event(clock->ptp, &ptp_event);
476 mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
477 cycles_now = mlx5_read_internal_timer(mdev, NULL);
480 ns = timespec64_to_ns(&ts);
481 write_seqlock_irqsave(&clock->lock, flags);
482 nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
483 nsec_delta = ns - nsec_now;
484 cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
486 clock->pps_info.start[pin] = cycles_now + cycles_delta;
487 schedule_work(&clock->pps_info.out_work);
488 write_sequnlock_irqrestore(&clock->lock, flags);
491 mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
492 clock->ptp_info.pin_config[pin].func);
498 void mlx5_init_clock(struct mlx5_core_dev *mdev)
500 struct mlx5_clock *clock = &mdev->clock;
506 dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
508 mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
511 seqlock_init(&clock->lock);
512 clock->cycles.read = read_internal_timer;
513 clock->cycles.shift = MLX5_CYCLES_SHIFT;
514 clock->cycles.mult = clocksource_khz2mult(dev_freq,
515 clock->cycles.shift);
516 clock->nominal_c_mult = clock->cycles.mult;
517 clock->cycles.mask = CLOCKSOURCE_MASK(41);
520 timecounter_init(&clock->tc, &clock->cycles,
521 ktime_to_ns(ktime_get_real()));
523 /* Calculate period in seconds to call the overflow watchdog - to make
524 * sure counter is checked at least twice every wrap around.
525 * The period is calculated as the minimum between max HW cycles count
526 * (The clock source mask) and max amount of cycles that can be
527 * multiplied by clock multiplier where the result doesn't exceed
530 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
531 overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
533 ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
535 do_div(ns, NSEC_PER_SEC / HZ);
536 clock->overflow_period = ns;
539 (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
540 if (mdev->clock_info) {
541 mdev->clock_info->nsec = clock->tc.nsec;
542 mdev->clock_info->cycles = clock->tc.cycle_last;
543 mdev->clock_info->mask = clock->cycles.mask;
544 mdev->clock_info->mult = clock->nominal_c_mult;
545 mdev->clock_info->shift = clock->cycles.shift;
546 mdev->clock_info->frac = clock->tc.frac;
547 mdev->clock_info->overflow_period = clock->overflow_period;
550 INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
551 INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
552 if (clock->overflow_period)
553 schedule_delayed_work(&clock->overflow_work, 0);
555 mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
557 /* Configure the PHC */
558 clock->ptp_info = mlx5_ptp_clock_info;
560 /* Initialize 1PPS data structures */
561 if (MLX5_PPS_CAP(mdev))
562 mlx5_get_pps_caps(mdev);
563 if (clock->ptp_info.n_pins)
564 mlx5_init_pin_config(clock);
566 clock->ptp = ptp_clock_register(&clock->ptp_info,
568 if (IS_ERR(clock->ptp)) {
569 mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
570 PTR_ERR(clock->ptp));
574 MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
575 mlx5_eq_notifier_register(mdev, &clock->pps_nb);
578 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
580 struct mlx5_clock *clock = &mdev->clock;
582 if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
585 mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
587 ptp_clock_unregister(clock->ptp);
591 cancel_work_sync(&clock->pps_info.out_work);
592 cancel_delayed_work_sync(&clock->overflow_work);
594 if (mdev->clock_info) {
595 free_page((unsigned long)mdev->clock_info);
596 mdev->clock_info = NULL;
599 kfree(clock->ptp_info.pin_config);