1 // SPDX-License-Identifier: GPL-2.0+
3 * (C) Copyright 2019 Xilinx, Inc.
4 * Siva Durga Prasad Paladugu <siva.durga.paladugu@xilinx.com>
8 #include <linux/bitops.h>
9 #include <linux/bitfield.h>
11 #include <clk-uclass.h>
14 #include <asm/arch/sys_proto.h>
15 #include <zynqmp_firmware.h>
17 #define MAX_PARENT 100
19 #define MAX_NAME_LEN 50
21 #define CLK_TYPE_SHIFT 2
23 #define PM_API_PAYLOAD_LEN 3
25 #define NA_PARENT 0xFFFFFFFF
26 #define DUMMY_PARENT 0xFFFFFFFE
28 #define CLK_TYPE_FIELD_LEN 4
29 #define CLK_TOPOLOGY_NODE_OFFSET 16
30 #define NODES_PER_RESP 3
32 #define CLK_TYPE_FIELD_MASK 0xF
33 #define CLK_FLAG_FIELD_MASK GENMASK(21, 8)
34 #define CLK_TYPE_FLAG_FIELD_MASK GENMASK(31, 24)
35 #define CLK_TYPE_FLAG2_FIELD_MASK GENMASK(7, 4)
36 #define CLK_TYPE_FLAG_BITS 8
38 #define CLK_PARENTS_ID_LEN 16
39 #define CLK_PARENTS_ID_MASK 0xFFFF
41 #define END_OF_TOPOLOGY_NODE 1
42 #define END_OF_PARENTS 1
44 #define CLK_VALID_MASK 0x1
45 #define NODE_CLASS_SHIFT 26U
46 #define NODE_SUBCLASS_SHIFT 20U
47 #define NODE_TYPE_SHIFT 14U
48 #define NODE_INDEX_SHIFT 0U
50 #define CLK_GET_NAME_RESP_LEN 16
51 #define CLK_GET_TOPOLOGY_RESP_WORDS 3
52 #define CLK_GET_PARENTS_RESP_WORDS 3
53 #define CLK_GET_ATTR_RESP_WORDS 1
55 #define NODE_SUBCLASS_CLOCK_PLL 1
56 #define NODE_SUBCLASS_CLOCK_OUT 2
57 #define NODE_SUBCLASS_CLOCK_REF 3
59 #define NODE_CLASS_CLOCK 2
60 #define NODE_CLASS_MASK 0x3F
62 #define CLOCK_NODE_TYPE_MUX 1
63 #define CLOCK_NODE_TYPE_DIV 4
64 #define CLOCK_NODE_TYPE_GATE 6
68 PM_QID_CLOCK_GET_NAME,
69 PM_QID_CLOCK_GET_TOPOLOGY,
70 PM_QID_CLOCK_GET_FIXEDFACTOR_PARAMS,
71 PM_QID_CLOCK_GET_PARENTS,
72 PM_QID_CLOCK_GET_ATTRIBUTES,
73 PM_QID_PINCTRL_GET_NUM_PINS,
74 PM_QID_PINCTRL_GET_NUM_FUNCTIONS,
75 PM_QID_PINCTRL_GET_NUM_FUNCTION_GROUPS,
76 PM_QID_PINCTRL_GET_FUNCTION_NAME,
77 PM_QID_PINCTRL_GET_FUNCTION_GROUPS,
78 PM_QID_PINCTRL_GET_PIN_GROUPS,
79 PM_QID_CLOCK_GET_NUM_CLOCKS,
80 PM_QID_CLOCK_GET_MAX_DIVISOR,
89 char name[MAX_NAME_LEN];
94 struct clock_topology {
100 struct versal_clock {
101 char clk_name[MAX_NAME_LEN];
104 struct clock_topology node[MAX_NODES];
106 struct clock_parent parent[MAX_PARENT];
111 struct versal_clk_priv {
112 struct versal_clock *clk;
115 static ulong alt_ref_clk;
116 static ulong pl_alt_ref_clk;
117 static ulong ref_clk;
119 struct versal_pm_query_data {
126 static struct versal_clock *clock;
127 static unsigned int clock_max_idx;
129 #define PM_QUERY_DATA 35
131 static int versal_pm_query(struct versal_pm_query_data qdata, u32 *ret_payload)
135 regs.regs[0] = PM_SIP_SVC | PM_QUERY_DATA;
136 regs.regs[1] = ((u64)qdata.arg1 << 32) | qdata.qid;
137 regs.regs[2] = ((u64)qdata.arg3 << 32) | qdata.arg2;
142 ret_payload[0] = (u32)regs.regs[0];
143 ret_payload[1] = upper_32_bits(regs.regs[0]);
144 ret_payload[2] = (u32)regs.regs[1];
145 ret_payload[3] = upper_32_bits(regs.regs[1]);
146 ret_payload[4] = (u32)regs.regs[2];
149 return qdata.qid == PM_QID_CLOCK_GET_NAME ? 0 : regs.regs[0];
152 static inline int versal_is_valid_clock(u32 clk_id)
154 if (clk_id >= clock_max_idx)
157 return clock[clk_id].valid;
160 static int versal_get_clock_name(u32 clk_id, char *clk_name)
164 ret = versal_is_valid_clock(clk_id);
166 strncpy(clk_name, clock[clk_id].clk_name, MAX_NAME_LEN);
170 return ret == 0 ? -EINVAL : ret;
173 static int versal_get_clock_type(u32 clk_id, u32 *type)
177 ret = versal_is_valid_clock(clk_id);
179 *type = clock[clk_id].type;
183 return ret == 0 ? -EINVAL : ret;
186 static int versal_pm_clock_get_num_clocks(u32 *nclocks)
188 struct versal_pm_query_data qdata = {0};
189 u32 ret_payload[PAYLOAD_ARG_CNT];
192 qdata.qid = PM_QID_CLOCK_GET_NUM_CLOCKS;
194 ret = versal_pm_query(qdata, ret_payload);
195 *nclocks = ret_payload[1];
200 static int versal_pm_clock_get_name(u32 clock_id, char *name)
202 struct versal_pm_query_data qdata = {0};
203 u32 ret_payload[PAYLOAD_ARG_CNT];
206 qdata.qid = PM_QID_CLOCK_GET_NAME;
207 qdata.arg1 = clock_id;
209 ret = versal_pm_query(qdata, ret_payload);
212 memcpy(name, ret_payload, CLK_GET_NAME_RESP_LEN);
217 static int versal_pm_clock_get_topology(u32 clock_id, u32 index, u32 *topology)
219 struct versal_pm_query_data qdata = {0};
220 u32 ret_payload[PAYLOAD_ARG_CNT];
223 qdata.qid = PM_QID_CLOCK_GET_TOPOLOGY;
224 qdata.arg1 = clock_id;
227 ret = versal_pm_query(qdata, ret_payload);
228 memcpy(topology, &ret_payload[1], CLK_GET_TOPOLOGY_RESP_WORDS * 4);
233 static int versal_pm_clock_get_parents(u32 clock_id, u32 index, u32 *parents)
235 struct versal_pm_query_data qdata = {0};
236 u32 ret_payload[PAYLOAD_ARG_CNT];
239 qdata.qid = PM_QID_CLOCK_GET_PARENTS;
240 qdata.arg1 = clock_id;
243 ret = versal_pm_query(qdata, ret_payload);
244 memcpy(parents, &ret_payload[1], CLK_GET_PARENTS_RESP_WORDS * 4);
249 static int versal_pm_clock_get_attributes(u32 clock_id, u32 *attr)
251 struct versal_pm_query_data qdata = {0};
252 u32 ret_payload[PAYLOAD_ARG_CNT];
255 qdata.qid = PM_QID_CLOCK_GET_ATTRIBUTES;
256 qdata.arg1 = clock_id;
258 ret = versal_pm_query(qdata, ret_payload);
259 memcpy(attr, &ret_payload[1], CLK_GET_ATTR_RESP_WORDS * 4);
264 static int __versal_clock_get_topology(struct clock_topology *topology,
265 u32 *data, u32 *nnodes)
269 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
270 if (!(data[i] & CLK_TYPE_FIELD_MASK))
271 return END_OF_TOPOLOGY_NODE;
272 topology[*nnodes].type = data[i] & CLK_TYPE_FIELD_MASK;
273 topology[*nnodes].flag = FIELD_GET(CLK_FLAG_FIELD_MASK,
275 topology[*nnodes].type_flag =
276 FIELD_GET(CLK_TYPE_FLAG_FIELD_MASK, data[i]);
277 topology[*nnodes].type_flag |=
278 FIELD_GET(CLK_TYPE_FLAG2_FIELD_MASK, data[i]) <<
280 debug("topology type:0x%x, flag:0x%x, type_flag:0x%x\n",
281 topology[*nnodes].type, topology[*nnodes].flag,
282 topology[*nnodes].type_flag);
289 static int versal_clock_get_topology(u32 clk_id,
290 struct clock_topology *topology,
294 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
297 for (j = 0; j <= MAX_NODES; j += 3) {
298 ret = versal_pm_clock_get_topology(clock[clk_id].clk_id, j,
302 ret = __versal_clock_get_topology(topology, pm_resp, num_nodes);
303 if (ret == END_OF_TOPOLOGY_NODE)
310 static int __versal_clock_get_parents(struct clock_parent *parents, u32 *data,
314 struct clock_parent *parent;
316 for (i = 0; i < PM_API_PAYLOAD_LEN; i++) {
317 if (data[i] == NA_PARENT)
318 return END_OF_PARENTS;
320 parent = &parents[i];
321 parent->id = data[i] & CLK_PARENTS_ID_MASK;
322 if (data[i] == DUMMY_PARENT) {
323 strcpy(parent->name, "dummy_name");
326 parent->flag = data[i] >> CLK_PARENTS_ID_LEN;
327 if (versal_get_clock_name(parent->id, parent->name))
330 debug("parent name:%s\n", parent->name);
337 static int versal_clock_get_parents(u32 clk_id, struct clock_parent *parents,
341 u32 pm_resp[PM_API_PAYLOAD_LEN] = {0};
345 /* Get parents from firmware */
346 ret = versal_pm_clock_get_parents(clock[clk_id].clk_id, j,
351 ret = __versal_clock_get_parents(&parents[j], pm_resp,
353 if (ret == END_OF_PARENTS)
355 j += PM_API_PAYLOAD_LEN;
356 } while (*num_parents <= MAX_PARENT);
361 static u32 versal_clock_get_div(u32 clk_id)
363 u32 ret_payload[PAYLOAD_ARG_CNT];
366 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
367 div = ret_payload[1];
372 static u32 versal_clock_set_div(u32 clk_id, u32 div)
374 u32 ret_payload[PAYLOAD_ARG_CNT];
376 xilinx_pm_request(PM_CLOCK_SETDIVIDER, clk_id, div, 0, 0, ret_payload);
381 static u64 versal_clock_ref(u32 clk_id)
383 u32 ret_payload[PAYLOAD_ARG_CNT];
386 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0, ret_payload);
387 ref = ret_payload[0];
391 return pl_alt_ref_clk;
395 static u64 versal_clock_get_pll_rate(u32 clk_id)
397 u32 ret_payload[PAYLOAD_ARG_CNT];
402 u32 parent_rate, parent_id;
403 u32 id = clk_id & 0xFFF;
405 xilinx_pm_request(PM_CLOCK_GETSTATE, clk_id, 0, 0, 0, ret_payload);
406 res = ret_payload[1];
408 printf("0%x PLL not enabled\n", clk_id);
412 parent_id = clock[clock[id].parent[0].id].clk_id;
413 parent_rate = versal_clock_ref(parent_id);
415 xilinx_pm_request(PM_CLOCK_GETDIVIDER, clk_id, 0, 0, 0, ret_payload);
416 fbdiv = ret_payload[1];
417 xilinx_pm_request(PM_CLOCK_PLL_GETPARAM, clk_id, 2, 0, 0, ret_payload);
418 frac = ret_payload[1];
420 freq = (fbdiv * parent_rate) >> (1 << frac);
425 static u32 versal_clock_mux(u32 clk_id)
428 u32 id = clk_id & 0xFFF;
430 for (i = 0; i < clock[id].num_nodes; i++)
431 if (clock[id].node[i].type == CLOCK_NODE_TYPE_MUX)
437 static u32 versal_clock_get_parentid(u32 clk_id)
440 u32 ret_payload[PAYLOAD_ARG_CNT];
441 u32 id = clk_id & 0xFFF;
443 if (versal_clock_mux(clk_id)) {
444 xilinx_pm_request(PM_CLOCK_GETPARENT, clk_id, 0, 0, 0,
446 parent_id = ret_payload[1];
449 debug("parent_id:0x%x\n", clock[clock[id].parent[parent_id].id].clk_id);
450 return clock[clock[id].parent[parent_id].id].clk_id;
453 static u32 versal_clock_gate(u32 clk_id)
455 u32 id = clk_id & 0xFFF;
458 for (i = 0; i < clock[id].num_nodes; i++)
459 if (clock[id].node[i].type == CLOCK_NODE_TYPE_GATE)
465 static u32 versal_clock_div(u32 clk_id)
468 u32 id = clk_id & 0xFFF;
470 for (i = 0; i < clock[id].num_nodes; i++)
471 if (clock[id].node[i].type == CLOCK_NODE_TYPE_DIV)
477 static u32 versal_clock_pll(u32 clk_id, u64 *clk_rate)
479 if (((clk_id >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK) ==
480 NODE_SUBCLASS_CLOCK_PLL &&
481 ((clk_id >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK) ==
483 *clk_rate = versal_clock_get_pll_rate(clk_id);
490 static u64 versal_clock_calc(u32 clk_id)
496 if (versal_clock_pll(clk_id, &clk_rate))
499 parent_id = versal_clock_get_parentid(clk_id);
500 if (((parent_id >> NODE_SUBCLASS_SHIFT) &
501 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
502 return versal_clock_ref(clk_id);
504 clk_rate = versal_clock_calc(parent_id);
506 if (versal_clock_div(clk_id)) {
507 div = versal_clock_get_div(clk_id);
508 clk_rate = DIV_ROUND_CLOSEST(clk_rate, div);
514 static int versal_clock_get_rate(u32 clk_id, u64 *clk_rate)
516 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
517 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_REF)
518 *clk_rate = versal_clock_ref(clk_id);
520 if (versal_clock_pll(clk_id, clk_rate))
523 if (((clk_id >> NODE_SUBCLASS_SHIFT) &
524 NODE_CLASS_MASK) == NODE_SUBCLASS_CLOCK_OUT &&
525 ((clk_id >> NODE_CLASS_SHIFT) &
526 NODE_CLASS_MASK) == NODE_CLASS_CLOCK) {
527 if (!versal_clock_gate(clk_id))
529 *clk_rate = versal_clock_calc(clk_id);
536 int soc_clk_dump(void)
539 u32 type, ret, i = 0;
541 printf("\n ****** VERSAL CLOCKS *****\n");
543 printf("alt_ref_clk:%ld pl_alt_ref_clk:%ld ref_clk:%ld\n",
544 alt_ref_clk, pl_alt_ref_clk, ref_clk);
545 for (i = 0; i < clock_max_idx; i++) {
546 debug("%s\n", clock[i].clk_name);
547 ret = versal_get_clock_type(i, &type);
548 if (ret || type != CLK_TYPE_OUTPUT)
551 ret = versal_clock_get_rate(clock[i].clk_id, &clk_rate);
554 printf("clk: %s freq:%lld\n",
555 clock[i].clk_name, clk_rate);
561 static void versal_get_clock_info(void)
564 u32 attr, type = 0, nodetype, subclass, class;
566 for (i = 0; i < clock_max_idx; i++) {
567 ret = versal_pm_clock_get_attributes(i, &attr);
571 clock[i].valid = attr & CLK_VALID_MASK;
572 clock[i].type = ((attr >> CLK_TYPE_SHIFT) & 0x1) ?
573 CLK_TYPE_EXTERNAL : CLK_TYPE_OUTPUT;
574 nodetype = (attr >> NODE_TYPE_SHIFT) & NODE_CLASS_MASK;
575 subclass = (attr >> NODE_SUBCLASS_SHIFT) & NODE_CLASS_MASK;
576 class = (attr >> NODE_CLASS_SHIFT) & NODE_CLASS_MASK;
578 clock[i].clk_id = (class << NODE_CLASS_SHIFT) |
579 (subclass << NODE_SUBCLASS_SHIFT) |
580 (nodetype << NODE_TYPE_SHIFT) |
581 (i << NODE_INDEX_SHIFT);
583 ret = versal_pm_clock_get_name(clock[i].clk_id,
587 debug("clk name:%s, Valid:%d, type:%d, clk_id:0x%x\n",
588 clock[i].clk_name, clock[i].valid,
589 clock[i].type, clock[i].clk_id);
592 /* Get topology of all clock */
593 for (i = 0; i < clock_max_idx; i++) {
594 ret = versal_get_clock_type(i, &type);
595 if (ret || type != CLK_TYPE_OUTPUT)
597 debug("clk name:%s\n", clock[i].clk_name);
598 ret = versal_clock_get_topology(i, clock[i].node,
599 &clock[i].num_nodes);
603 ret = versal_clock_get_parents(i, clock[i].parent,
604 &clock[i].num_parents);
610 int versal_clock_setup(void)
614 ret = versal_pm_clock_get_num_clocks(&clock_max_idx);
618 debug("%s, clock_max_idx:0x%x\n", __func__, clock_max_idx);
619 clock = calloc(clock_max_idx, sizeof(*clock));
623 versal_get_clock_info();
628 static int versal_clock_get_freq_by_name(char *name, struct udevice *dev,
634 ret = clk_get_by_name(dev, name, &clk);
636 dev_err(dev, "failed to get %s\n", name);
640 *freq = clk_get_rate(&clk);
641 if (IS_ERR_VALUE(*freq)) {
642 dev_err(dev, "failed to get rate %s\n", name);
649 static int versal_clk_probe(struct udevice *dev)
652 struct versal_clk_priv *priv = dev_get_priv(dev);
654 debug("%s\n", __func__);
656 ret = versal_clock_get_freq_by_name("alt_ref_clk", dev, &alt_ref_clk);
660 ret = versal_clock_get_freq_by_name("pl_alt_ref_clk",
661 dev, &pl_alt_ref_clk);
665 ret = versal_clock_get_freq_by_name("ref_clk", dev, &ref_clk);
669 versal_clock_setup();
676 static ulong versal_clk_get_rate(struct clk *clk)
678 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
683 debug("%s\n", __func__);
685 clk_id = priv->clk[id].clk_id;
687 versal_clock_get_rate(clk_id, &clk_rate);
692 static ulong versal_clk_set_rate(struct clk *clk, ulong rate)
694 struct versal_clk_priv *priv = dev_get_priv(clk->dev);
701 debug("%s\n", __func__);
703 clk_id = priv->clk[id].clk_id;
705 ret = versal_clock_get_rate(clk_id, &clk_rate);
707 printf("Clock is not a Gate:0x%x\n", clk_id);
712 if (versal_clock_div(clk_id)) {
713 div = versal_clock_get_div(clk_id);
715 div = DIV_ROUND_CLOSEST(clk_rate, rate);
716 versal_clock_set_div(clk_id, div);
717 debug("%s, div:%d, newrate:%lld\n", __func__,
718 div, DIV_ROUND_CLOSEST(clk_rate, div));
719 return DIV_ROUND_CLOSEST(clk_rate, div);
721 clk_id = versal_clock_get_parentid(clk_id);
722 } while (((clk_id >> NODE_SUBCLASS_SHIFT) &
723 NODE_CLASS_MASK) != NODE_SUBCLASS_CLOCK_REF);
725 printf("Clock didn't has Divisors:0x%x\n", priv->clk[id].clk_id);
730 static struct clk_ops versal_clk_ops = {
731 .set_rate = versal_clk_set_rate,
732 .get_rate = versal_clk_get_rate,
735 static const struct udevice_id versal_clk_ids[] = {
736 { .compatible = "xlnx,versal-clk" },
740 U_BOOT_DRIVER(versal_clk) = {
741 .name = "versal-clk",
743 .of_match = versal_clk_ids,
744 .probe = versal_clk_probe,
745 .ops = &versal_clk_ops,
746 .priv_auto_alloc_size = sizeof(struct versal_clk_priv),