Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / net / dsa / sja1105 / sja1105_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/delay.h>
9 #include <linux/module.h>
10 #include <linux/printk.h>
11 #include <linux/spi/spi.h>
12 #include <linux/errno.h>
13 #include <linux/gpio/consumer.h>
14 #include <linux/phylink.h>
15 #include <linux/of.h>
16 #include <linux/of_net.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_device.h>
19 #include <linux/netdev_features.h>
20 #include <linux/netdevice.h>
21 #include <linux/if_bridge.h>
22 #include <linux/if_ether.h>
23 #include <linux/dsa/8021q.h>
24 #include "sja1105.h"
25 #include "sja1105_tas.h"
26
27 static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
28                              unsigned int startup_delay)
29 {
30         gpiod_set_value_cansleep(gpio, 1);
31         /* Wait for minimum reset pulse length */
32         msleep(pulse_len);
33         gpiod_set_value_cansleep(gpio, 0);
34         /* Wait until chip is ready after reset */
35         msleep(startup_delay);
36 }
37
38 static void
39 sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
40                            int from, int to, bool allow)
41 {
42         if (allow) {
43                 l2_fwd[from].bc_domain  |= BIT(to);
44                 l2_fwd[from].reach_port |= BIT(to);
45                 l2_fwd[from].fl_domain  |= BIT(to);
46         } else {
47                 l2_fwd[from].bc_domain  &= ~BIT(to);
48                 l2_fwd[from].reach_port &= ~BIT(to);
49                 l2_fwd[from].fl_domain  &= ~BIT(to);
50         }
51 }
52
53 /* Structure used to temporarily transport device tree
54  * settings into sja1105_setup
55  */
56 struct sja1105_dt_port {
57         phy_interface_t phy_mode;
58         sja1105_mii_role_t role;
59 };
60
61 static int sja1105_init_mac_settings(struct sja1105_private *priv)
62 {
63         struct sja1105_mac_config_entry default_mac = {
64                 /* Enable all 8 priority queues on egress.
65                  * Every queue i holds top[i] - base[i] frames.
66                  * Sum of top[i] - base[i] is 511 (max hardware limit).
67                  */
68                 .top  = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
69                 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
70                 .enabled = {true, true, true, true, true, true, true, true},
71                 /* Keep standard IFG of 12 bytes on egress. */
72                 .ifg = 0,
73                 /* Always put the MAC speed in automatic mode, where it can be
74                  * adjusted at runtime by PHYLINK.
75                  */
76                 .speed = SJA1105_SPEED_AUTO,
77                 /* No static correction for 1-step 1588 events */
78                 .tp_delin = 0,
79                 .tp_delout = 0,
80                 /* Disable aging for critical TTEthernet traffic */
81                 .maxage = 0xFF,
82                 /* Internal VLAN (pvid) to apply to untagged ingress */
83                 .vlanprio = 0,
84                 .vlanid = 1,
85                 .ing_mirr = false,
86                 .egr_mirr = false,
87                 /* Don't drop traffic with other EtherType than ETH_P_IP */
88                 .drpnona664 = false,
89                 /* Don't drop double-tagged traffic */
90                 .drpdtag = false,
91                 /* Don't drop untagged traffic */
92                 .drpuntag = false,
93                 /* Don't retag 802.1p (VID 0) traffic with the pvid */
94                 .retag = false,
95                 /* Disable learning and I/O on user ports by default -
96                  * STP will enable it.
97                  */
98                 .dyn_learn = false,
99                 .egress = false,
100                 .ingress = false,
101         };
102         struct sja1105_mac_config_entry *mac;
103         struct sja1105_table *table;
104         int i;
105
106         table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108         /* Discard previous MAC Configuration Table */
109         if (table->entry_count) {
110                 kfree(table->entries);
111                 table->entry_count = 0;
112         }
113
114         table->entries = kcalloc(SJA1105_NUM_PORTS,
115                                  table->ops->unpacked_entry_size, GFP_KERNEL);
116         if (!table->entries)
117                 return -ENOMEM;
118
119         table->entry_count = SJA1105_NUM_PORTS;
120
121         mac = table->entries;
122
123         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
124                 mac[i] = default_mac;
125                 if (i == dsa_upstream_port(priv->ds, i)) {
126                         /* STP doesn't get called for CPU port, so we need to
127                          * set the I/O parameters statically.
128                          */
129                         mac[i].dyn_learn = true;
130                         mac[i].ingress = true;
131                         mac[i].egress = true;
132                 }
133         }
134
135         return 0;
136 }
137
138 static int sja1105_init_mii_settings(struct sja1105_private *priv,
139                                      struct sja1105_dt_port *ports)
140 {
141         struct device *dev = &priv->spidev->dev;
142         struct sja1105_xmii_params_entry *mii;
143         struct sja1105_table *table;
144         int i;
145
146         table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
147
148         /* Discard previous xMII Mode Parameters Table */
149         if (table->entry_count) {
150                 kfree(table->entries);
151                 table->entry_count = 0;
152         }
153
154         table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
155                                  table->ops->unpacked_entry_size, GFP_KERNEL);
156         if (!table->entries)
157                 return -ENOMEM;
158
159         /* Override table based on PHYLINK DT bindings */
160         table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
161
162         mii = table->entries;
163
164         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
165                 switch (ports[i].phy_mode) {
166                 case PHY_INTERFACE_MODE_MII:
167                         mii->xmii_mode[i] = XMII_MODE_MII;
168                         break;
169                 case PHY_INTERFACE_MODE_RMII:
170                         mii->xmii_mode[i] = XMII_MODE_RMII;
171                         break;
172                 case PHY_INTERFACE_MODE_RGMII:
173                 case PHY_INTERFACE_MODE_RGMII_ID:
174                 case PHY_INTERFACE_MODE_RGMII_RXID:
175                 case PHY_INTERFACE_MODE_RGMII_TXID:
176                         mii->xmii_mode[i] = XMII_MODE_RGMII;
177                         break;
178                 default:
179                         dev_err(dev, "Unsupported PHY mode %s!\n",
180                                 phy_modes(ports[i].phy_mode));
181                 }
182
183                 mii->phy_mac[i] = ports[i].role;
184         }
185         return 0;
186 }
187
188 static int sja1105_init_static_fdb(struct sja1105_private *priv)
189 {
190         struct sja1105_table *table;
191
192         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
193
194         /* We only populate the FDB table through dynamic
195          * L2 Address Lookup entries
196          */
197         if (table->entry_count) {
198                 kfree(table->entries);
199                 table->entry_count = 0;
200         }
201         return 0;
202 }
203
204 static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
205 {
206         struct sja1105_table *table;
207         u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
208         struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
209                 /* Learned FDB entries are forgotten after 300 seconds */
210                 .maxage = SJA1105_AGEING_TIME_MS(300000),
211                 /* All entries within a FDB bin are available for learning */
212                 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
213                 /* And the P/Q/R/S equivalent setting: */
214                 .start_dynspc = 0,
215                 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
216                              max_fdb_entries, max_fdb_entries, },
217                 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
218                 .poly = 0x97,
219                 /* This selects between Independent VLAN Learning (IVL) and
220                  * Shared VLAN Learning (SVL)
221                  */
222                 .shared_learn = true,
223                 /* Don't discard management traffic based on ENFPORT -
224                  * we don't perform SMAC port enforcement anyway, so
225                  * what we are setting here doesn't matter.
226                  */
227                 .no_enf_hostprt = false,
228                 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
229                  * Maybe correlate with no_linklocal_learn from bridge driver?
230                  */
231                 .no_mgmt_learn = true,
232                 /* P/Q/R/S only */
233                 .use_static = true,
234                 /* Dynamically learned FDB entries can overwrite other (older)
235                  * dynamic FDB entries
236                  */
237                 .owr_dyn = true,
238                 .drpnolearn = true,
239         };
240
241         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
242
243         if (table->entry_count) {
244                 kfree(table->entries);
245                 table->entry_count = 0;
246         }
247
248         table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
249                                  table->ops->unpacked_entry_size, GFP_KERNEL);
250         if (!table->entries)
251                 return -ENOMEM;
252
253         table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
254
255         /* This table only has a single entry */
256         ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
257                                 default_l2_lookup_params;
258
259         return 0;
260 }
261
262 static int sja1105_init_static_vlan(struct sja1105_private *priv)
263 {
264         struct sja1105_table *table;
265         struct sja1105_vlan_lookup_entry pvid = {
266                 .ving_mirr = 0,
267                 .vegr_mirr = 0,
268                 .vmemb_port = 0,
269                 .vlan_bc = 0,
270                 .tag_port = 0,
271                 .vlanid = 1,
272         };
273         int i;
274
275         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
276
277         /* The static VLAN table will only contain the initial pvid of 1.
278          * All other VLANs are to be configured through dynamic entries,
279          * and kept in the static configuration table as backing memory.
280          */
281         if (table->entry_count) {
282                 kfree(table->entries);
283                 table->entry_count = 0;
284         }
285
286         table->entries = kcalloc(1, table->ops->unpacked_entry_size,
287                                  GFP_KERNEL);
288         if (!table->entries)
289                 return -ENOMEM;
290
291         table->entry_count = 1;
292
293         /* VLAN 1: all DT-defined ports are members; no restrictions on
294          * forwarding; always transmit priority-tagged frames as untagged.
295          */
296         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
297                 pvid.vmemb_port |= BIT(i);
298                 pvid.vlan_bc |= BIT(i);
299                 pvid.tag_port &= ~BIT(i);
300         }
301
302         ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
303         return 0;
304 }
305
306 static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
307 {
308         struct sja1105_l2_forwarding_entry *l2fwd;
309         struct sja1105_table *table;
310         int i, j;
311
312         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
313
314         if (table->entry_count) {
315                 kfree(table->entries);
316                 table->entry_count = 0;
317         }
318
319         table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
320                                  table->ops->unpacked_entry_size, GFP_KERNEL);
321         if (!table->entries)
322                 return -ENOMEM;
323
324         table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
325
326         l2fwd = table->entries;
327
328         /* First 5 entries define the forwarding rules */
329         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
330                 unsigned int upstream = dsa_upstream_port(priv->ds, i);
331
332                 for (j = 0; j < SJA1105_NUM_TC; j++)
333                         l2fwd[i].vlan_pmap[j] = j;
334
335                 if (i == upstream)
336                         continue;
337
338                 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
339                 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
340         }
341         /* Next 8 entries define VLAN PCP mapping from ingress to egress.
342          * Create a one-to-one mapping.
343          */
344         for (i = 0; i < SJA1105_NUM_TC; i++)
345                 for (j = 0; j < SJA1105_NUM_PORTS; j++)
346                         l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
347
348         return 0;
349 }
350
351 static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
352 {
353         struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
354                 /* Disallow dynamic reconfiguration of vlan_pmap */
355                 .max_dynp = 0,
356                 /* Use a single memory partition for all ingress queues */
357                 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
358         };
359         struct sja1105_table *table;
360
361         table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
362
363         if (table->entry_count) {
364                 kfree(table->entries);
365                 table->entry_count = 0;
366         }
367
368         table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
369                                  table->ops->unpacked_entry_size, GFP_KERNEL);
370         if (!table->entries)
371                 return -ENOMEM;
372
373         table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
374
375         /* This table only has a single entry */
376         ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
377                                 default_l2fwd_params;
378
379         return 0;
380 }
381
382 static int sja1105_init_general_params(struct sja1105_private *priv)
383 {
384         struct sja1105_general_params_entry default_general_params = {
385                 /* Disallow dynamic changing of the mirror port */
386                 .mirr_ptacu = 0,
387                 .switchid = priv->ds->index,
388                 /* Priority queue for link-local management frames
389                  * (both ingress to and egress from CPU - PTP, STP etc)
390                  */
391                 .hostprio = 7,
392                 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
393                 .mac_flt1    = SJA1105_LINKLOCAL_FILTER_A_MASK,
394                 .incl_srcpt1 = false,
395                 .send_meta1  = false,
396                 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
397                 .mac_flt0    = SJA1105_LINKLOCAL_FILTER_B_MASK,
398                 .incl_srcpt0 = false,
399                 .send_meta0  = false,
400                 /* The destination for traffic matching mac_fltres1 and
401                  * mac_fltres0 on all ports except host_port. Such traffic
402                  * receieved on host_port itself would be dropped, except
403                  * by installing a temporary 'management route'
404                  */
405                 .host_port = dsa_upstream_port(priv->ds, 0),
406                 /* Same as host port */
407                 .mirr_port = dsa_upstream_port(priv->ds, 0),
408                 /* Link-local traffic received on casc_port will be forwarded
409                  * to host_port without embedding the source port and device ID
410                  * info in the destination MAC address (presumably because it
411                  * is a cascaded port and a downstream SJA switch already did
412                  * that). Default to an invalid port (to disable the feature)
413                  * and overwrite this if we find any DSA (cascaded) ports.
414                  */
415                 .casc_port = SJA1105_NUM_PORTS,
416                 /* No TTEthernet */
417                 .vllupformat = 0,
418                 .vlmarker = 0,
419                 .vlmask = 0,
420                 /* Only update correctionField for 1-step PTP (L2 transport) */
421                 .ignore2stf = 0,
422                 /* Forcefully disable VLAN filtering by telling
423                  * the switch that VLAN has a different EtherType.
424                  */
425                 .tpid = ETH_P_SJA1105,
426                 .tpid2 = ETH_P_SJA1105,
427         };
428         struct sja1105_table *table;
429         int i, k = 0;
430
431         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
432                 if (dsa_is_dsa_port(priv->ds, i))
433                         default_general_params.casc_port = i;
434                 else if (dsa_is_user_port(priv->ds, i))
435                         priv->ports[i].mgmt_slot = k++;
436         }
437
438         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
439
440         if (table->entry_count) {
441                 kfree(table->entries);
442                 table->entry_count = 0;
443         }
444
445         table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
446                                  table->ops->unpacked_entry_size, GFP_KERNEL);
447         if (!table->entries)
448                 return -ENOMEM;
449
450         table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
451
452         /* This table only has a single entry */
453         ((struct sja1105_general_params_entry *)table->entries)[0] =
454                                 default_general_params;
455
456         return 0;
457 }
458
459 #define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
460
461 static inline void
462 sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
463                       int index)
464 {
465         policing[index].sharindx = index;
466         policing[index].smax = 65535; /* Burst size in bytes */
467         policing[index].rate = SJA1105_RATE_MBPS(1000);
468         policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
469         policing[index].partition = 0;
470 }
471
472 static int sja1105_init_l2_policing(struct sja1105_private *priv)
473 {
474         struct sja1105_l2_policing_entry *policing;
475         struct sja1105_table *table;
476         int i, j, k;
477
478         table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
479
480         /* Discard previous L2 Policing Table */
481         if (table->entry_count) {
482                 kfree(table->entries);
483                 table->entry_count = 0;
484         }
485
486         table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
487                                  table->ops->unpacked_entry_size, GFP_KERNEL);
488         if (!table->entries)
489                 return -ENOMEM;
490
491         table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
492
493         policing = table->entries;
494
495         /* k sweeps through all unicast policers (0-39).
496          * bcast sweeps through policers 40-44.
497          */
498         for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
499                 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
500
501                 for (j = 0; j < SJA1105_NUM_TC; j++, k++)
502                         sja1105_setup_policer(policing, k);
503
504                 /* Set up this port's policer for broadcast traffic */
505                 sja1105_setup_policer(policing, bcast);
506         }
507         return 0;
508 }
509
510 static int sja1105_init_avb_params(struct sja1105_private *priv,
511                                    bool on)
512 {
513         struct sja1105_avb_params_entry *avb;
514         struct sja1105_table *table;
515
516         table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
517
518         /* Discard previous AVB Parameters Table */
519         if (table->entry_count) {
520                 kfree(table->entries);
521                 table->entry_count = 0;
522         }
523
524         /* Configure the reception of meta frames only if requested */
525         if (!on)
526                 return 0;
527
528         table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
529                                  table->ops->unpacked_entry_size, GFP_KERNEL);
530         if (!table->entries)
531                 return -ENOMEM;
532
533         table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
534
535         avb = table->entries;
536
537         avb->destmeta = SJA1105_META_DMAC;
538         avb->srcmeta  = SJA1105_META_SMAC;
539
540         return 0;
541 }
542
543 static int sja1105_static_config_load(struct sja1105_private *priv,
544                                       struct sja1105_dt_port *ports)
545 {
546         int rc;
547
548         sja1105_static_config_free(&priv->static_config);
549         rc = sja1105_static_config_init(&priv->static_config,
550                                         priv->info->static_ops,
551                                         priv->info->device_id);
552         if (rc)
553                 return rc;
554
555         /* Build static configuration */
556         rc = sja1105_init_mac_settings(priv);
557         if (rc < 0)
558                 return rc;
559         rc = sja1105_init_mii_settings(priv, ports);
560         if (rc < 0)
561                 return rc;
562         rc = sja1105_init_static_fdb(priv);
563         if (rc < 0)
564                 return rc;
565         rc = sja1105_init_static_vlan(priv);
566         if (rc < 0)
567                 return rc;
568         rc = sja1105_init_l2_lookup_params(priv);
569         if (rc < 0)
570                 return rc;
571         rc = sja1105_init_l2_forwarding(priv);
572         if (rc < 0)
573                 return rc;
574         rc = sja1105_init_l2_forwarding_params(priv);
575         if (rc < 0)
576                 return rc;
577         rc = sja1105_init_l2_policing(priv);
578         if (rc < 0)
579                 return rc;
580         rc = sja1105_init_general_params(priv);
581         if (rc < 0)
582                 return rc;
583         rc = sja1105_init_avb_params(priv, false);
584         if (rc < 0)
585                 return rc;
586
587         /* Send initial configuration to hardware via SPI */
588         return sja1105_static_config_upload(priv);
589 }
590
591 static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
592                                       const struct sja1105_dt_port *ports)
593 {
594         int i;
595
596         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
597                 if (ports[i].role == XMII_MAC)
598                         continue;
599
600                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
601                     ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
602                         priv->rgmii_rx_delay[i] = true;
603
604                 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
605                     ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
606                         priv->rgmii_tx_delay[i] = true;
607
608                 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
609                      !priv->info->setup_rgmii_delay)
610                         return -EINVAL;
611         }
612         return 0;
613 }
614
615 static int sja1105_parse_ports_node(struct sja1105_private *priv,
616                                     struct sja1105_dt_port *ports,
617                                     struct device_node *ports_node)
618 {
619         struct device *dev = &priv->spidev->dev;
620         struct device_node *child;
621
622         for_each_available_child_of_node(ports_node, child) {
623                 struct device_node *phy_node;
624                 int phy_mode;
625                 u32 index;
626
627                 /* Get switch port number from DT */
628                 if (of_property_read_u32(child, "reg", &index) < 0) {
629                         dev_err(dev, "Port number not defined in device tree "
630                                 "(property \"reg\")\n");
631                         of_node_put(child);
632                         return -ENODEV;
633                 }
634
635                 /* Get PHY mode from DT */
636                 phy_mode = of_get_phy_mode(child);
637                 if (phy_mode < 0) {
638                         dev_err(dev, "Failed to read phy-mode or "
639                                 "phy-interface-type property for port %d\n",
640                                 index);
641                         of_node_put(child);
642                         return -ENODEV;
643                 }
644                 ports[index].phy_mode = phy_mode;
645
646                 phy_node = of_parse_phandle(child, "phy-handle", 0);
647                 if (!phy_node) {
648                         if (!of_phy_is_fixed_link(child)) {
649                                 dev_err(dev, "phy-handle or fixed-link "
650                                         "properties missing!\n");
651                                 of_node_put(child);
652                                 return -ENODEV;
653                         }
654                         /* phy-handle is missing, but fixed-link isn't.
655                          * So it's a fixed link. Default to PHY role.
656                          */
657                         ports[index].role = XMII_PHY;
658                 } else {
659                         /* phy-handle present => put port in MAC role */
660                         ports[index].role = XMII_MAC;
661                         of_node_put(phy_node);
662                 }
663
664                 /* The MAC/PHY role can be overridden with explicit bindings */
665                 if (of_property_read_bool(child, "sja1105,role-mac"))
666                         ports[index].role = XMII_MAC;
667                 else if (of_property_read_bool(child, "sja1105,role-phy"))
668                         ports[index].role = XMII_PHY;
669         }
670
671         return 0;
672 }
673
674 static int sja1105_parse_dt(struct sja1105_private *priv,
675                             struct sja1105_dt_port *ports)
676 {
677         struct device *dev = &priv->spidev->dev;
678         struct device_node *switch_node = dev->of_node;
679         struct device_node *ports_node;
680         int rc;
681
682         ports_node = of_get_child_by_name(switch_node, "ports");
683         if (!ports_node) {
684                 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
685                 return -ENODEV;
686         }
687
688         rc = sja1105_parse_ports_node(priv, ports, ports_node);
689         of_node_put(ports_node);
690
691         return rc;
692 }
693
694 /* Convert link speed from SJA1105 to ethtool encoding */
695 static int sja1105_speed[] = {
696         [SJA1105_SPEED_AUTO]            = SPEED_UNKNOWN,
697         [SJA1105_SPEED_10MBPS]          = SPEED_10,
698         [SJA1105_SPEED_100MBPS]         = SPEED_100,
699         [SJA1105_SPEED_1000MBPS]        = SPEED_1000,
700 };
701
702 /* Set link speed in the MAC configuration for a specific port. */
703 static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
704                                       int speed_mbps)
705 {
706         struct sja1105_xmii_params_entry *mii;
707         struct sja1105_mac_config_entry *mac;
708         struct device *dev = priv->ds->dev;
709         sja1105_phy_interface_t phy_mode;
710         sja1105_speed_t speed;
711         int rc;
712
713         /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
714          * tables. On E/T, MAC reconfig tables are not readable, only writable.
715          * We have to *know* what the MAC looks like.  For the sake of keeping
716          * the code common, we'll use the static configuration tables as a
717          * reasonable approximation for both E/T and P/Q/R/S.
718          */
719         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
720         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
721
722         switch (speed_mbps) {
723         case SPEED_UNKNOWN:
724                 /* PHYLINK called sja1105_mac_config() to inform us about
725                  * the state->interface, but AN has not completed and the
726                  * speed is not yet valid. UM10944.pdf says that setting
727                  * SJA1105_SPEED_AUTO at runtime disables the port, so that is
728                  * ok for power consumption in case AN will never complete -
729                  * otherwise PHYLINK should come back with a new update.
730                  */
731                 speed = SJA1105_SPEED_AUTO;
732                 break;
733         case SPEED_10:
734                 speed = SJA1105_SPEED_10MBPS;
735                 break;
736         case SPEED_100:
737                 speed = SJA1105_SPEED_100MBPS;
738                 break;
739         case SPEED_1000:
740                 speed = SJA1105_SPEED_1000MBPS;
741                 break;
742         default:
743                 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
744                 return -EINVAL;
745         }
746
747         /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
748          * table, since this will be used for the clocking setup, and we no
749          * longer need to store it in the static config (already told hardware
750          * we want auto during upload phase).
751          */
752         mac[port].speed = speed;
753
754         /* Write to the dynamic reconfiguration tables */
755         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
756                                           &mac[port], true);
757         if (rc < 0) {
758                 dev_err(dev, "Failed to write MAC config: %d\n", rc);
759                 return rc;
760         }
761
762         /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
763          * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
764          * RMII no change of the clock setup is required. Actually, changing
765          * the clock setup does interrupt the clock signal for a certain time
766          * which causes trouble for all PHYs relying on this signal.
767          */
768         phy_mode = mii->xmii_mode[port];
769         if (phy_mode != XMII_MODE_RGMII)
770                 return 0;
771
772         return sja1105_clocking_setup_port(priv, port);
773 }
774
775 /* The SJA1105 MAC programming model is through the static config (the xMII
776  * Mode table cannot be dynamically reconfigured), and we have to program
777  * that early (earlier than PHYLINK calls us, anyway).
778  * So just error out in case the connected PHY attempts to change the initial
779  * system interface MII protocol from what is defined in the DT, at least for
780  * now.
781  */
782 static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
783                                       phy_interface_t interface)
784 {
785         struct sja1105_xmii_params_entry *mii;
786         sja1105_phy_interface_t phy_mode;
787
788         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
789         phy_mode = mii->xmii_mode[port];
790
791         switch (interface) {
792         case PHY_INTERFACE_MODE_MII:
793                 return (phy_mode != XMII_MODE_MII);
794         case PHY_INTERFACE_MODE_RMII:
795                 return (phy_mode != XMII_MODE_RMII);
796         case PHY_INTERFACE_MODE_RGMII:
797         case PHY_INTERFACE_MODE_RGMII_ID:
798         case PHY_INTERFACE_MODE_RGMII_RXID:
799         case PHY_INTERFACE_MODE_RGMII_TXID:
800                 return (phy_mode != XMII_MODE_RGMII);
801         default:
802                 return true;
803         }
804 }
805
806 static void sja1105_mac_config(struct dsa_switch *ds, int port,
807                                unsigned int link_an_mode,
808                                const struct phylink_link_state *state)
809 {
810         struct sja1105_private *priv = ds->priv;
811
812         if (sja1105_phy_mode_mismatch(priv, port, state->interface))
813                 return;
814
815         if (link_an_mode == MLO_AN_INBAND) {
816                 dev_err(ds->dev, "In-band AN not supported!\n");
817                 return;
818         }
819
820         sja1105_adjust_port_config(priv, port, state->speed);
821 }
822
823 static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
824                                   unsigned int mode,
825                                   phy_interface_t interface)
826 {
827         sja1105_inhibit_tx(ds->priv, BIT(port), true);
828 }
829
830 static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
831                                 unsigned int mode,
832                                 phy_interface_t interface,
833                                 struct phy_device *phydev)
834 {
835         sja1105_inhibit_tx(ds->priv, BIT(port), false);
836 }
837
838 static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
839                                      unsigned long *supported,
840                                      struct phylink_link_state *state)
841 {
842         /* Construct a new mask which exhaustively contains all link features
843          * supported by the MAC, and then apply that (logical AND) to what will
844          * be sent to the PHY for "marketing".
845          */
846         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
847         struct sja1105_private *priv = ds->priv;
848         struct sja1105_xmii_params_entry *mii;
849
850         mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
851
852         /* include/linux/phylink.h says:
853          *     When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
854          *     expects the MAC driver to return all supported link modes.
855          */
856         if (state->interface != PHY_INTERFACE_MODE_NA &&
857             sja1105_phy_mode_mismatch(priv, port, state->interface)) {
858                 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
859                 return;
860         }
861
862         /* The MAC does not support pause frames, and also doesn't
863          * support half-duplex traffic modes.
864          */
865         phylink_set(mask, Autoneg);
866         phylink_set(mask, MII);
867         phylink_set(mask, 10baseT_Full);
868         phylink_set(mask, 100baseT_Full);
869         if (mii->xmii_mode[port] == XMII_MODE_RGMII)
870                 phylink_set(mask, 1000baseT_Full);
871
872         bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
873         bitmap_and(state->advertising, state->advertising, mask,
874                    __ETHTOOL_LINK_MODE_MASK_NBITS);
875 }
876
877 static int
878 sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
879                               const struct sja1105_l2_lookup_entry *requested)
880 {
881         struct sja1105_l2_lookup_entry *l2_lookup;
882         struct sja1105_table *table;
883         int i;
884
885         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
886         l2_lookup = table->entries;
887
888         for (i = 0; i < table->entry_count; i++)
889                 if (l2_lookup[i].macaddr == requested->macaddr &&
890                     l2_lookup[i].vlanid == requested->vlanid &&
891                     l2_lookup[i].destports & BIT(port))
892                         return i;
893
894         return -1;
895 }
896
897 /* We want FDB entries added statically through the bridge command to persist
898  * across switch resets, which are a common thing during normal SJA1105
899  * operation. So we have to back them up in the static configuration tables
900  * and hence apply them on next static config upload... yay!
901  */
902 static int
903 sja1105_static_fdb_change(struct sja1105_private *priv, int port,
904                           const struct sja1105_l2_lookup_entry *requested,
905                           bool keep)
906 {
907         struct sja1105_l2_lookup_entry *l2_lookup;
908         struct sja1105_table *table;
909         int rc, match;
910
911         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
912
913         match = sja1105_find_static_fdb_entry(priv, port, requested);
914         if (match < 0) {
915                 /* Can't delete a missing entry. */
916                 if (!keep)
917                         return 0;
918
919                 /* No match => new entry */
920                 rc = sja1105_table_resize(table, table->entry_count + 1);
921                 if (rc)
922                         return rc;
923
924                 match = table->entry_count - 1;
925         }
926
927         /* Assign pointer after the resize (it may be new memory) */
928         l2_lookup = table->entries;
929
930         /* We have a match.
931          * If the job was to add this FDB entry, it's already done (mostly
932          * anyway, since the port forwarding mask may have changed, case in
933          * which we update it).
934          * Otherwise we have to delete it.
935          */
936         if (keep) {
937                 l2_lookup[match] = *requested;
938                 return 0;
939         }
940
941         /* To remove, the strategy is to overwrite the element with
942          * the last one, and then reduce the array size by 1
943          */
944         l2_lookup[match] = l2_lookup[table->entry_count - 1];
945         return sja1105_table_resize(table, table->entry_count - 1);
946 }
947
948 /* First-generation switches have a 4-way set associative TCAM that
949  * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
950  * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
951  * For the placement of a newly learnt FDB entry, the switch selects the bin
952  * based on a hash function, and the way within that bin incrementally.
953  */
954 static inline int sja1105et_fdb_index(int bin, int way)
955 {
956         return bin * SJA1105ET_FDB_BIN_SIZE + way;
957 }
958
959 static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
960                                          const u8 *addr, u16 vid,
961                                          struct sja1105_l2_lookup_entry *match,
962                                          int *last_unused)
963 {
964         int way;
965
966         for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
967                 struct sja1105_l2_lookup_entry l2_lookup = {0};
968                 int index = sja1105et_fdb_index(bin, way);
969
970                 /* Skip unused entries, optionally marking them
971                  * into the return value
972                  */
973                 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
974                                                 index, &l2_lookup)) {
975                         if (last_unused)
976                                 *last_unused = way;
977                         continue;
978                 }
979
980                 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
981                     l2_lookup.vlanid == vid) {
982                         if (match)
983                                 *match = l2_lookup;
984                         return way;
985                 }
986         }
987         /* Return an invalid entry index if not found */
988         return -1;
989 }
990
991 int sja1105et_fdb_add(struct dsa_switch *ds, int port,
992                       const unsigned char *addr, u16 vid)
993 {
994         struct sja1105_l2_lookup_entry l2_lookup = {0};
995         struct sja1105_private *priv = ds->priv;
996         struct device *dev = ds->dev;
997         int last_unused = -1;
998         int bin, way, rc;
999
1000         bin = sja1105et_fdb_hash(priv, addr, vid);
1001
1002         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1003                                             &l2_lookup, &last_unused);
1004         if (way >= 0) {
1005                 /* We have an FDB entry. Is our port in the destination
1006                  * mask? If yes, we need to do nothing. If not, we need
1007                  * to rewrite the entry by adding this port to it.
1008                  */
1009                 if (l2_lookup.destports & BIT(port))
1010                         return 0;
1011                 l2_lookup.destports |= BIT(port);
1012         } else {
1013                 int index = sja1105et_fdb_index(bin, way);
1014
1015                 /* We don't have an FDB entry. We construct a new one and
1016                  * try to find a place for it within the FDB table.
1017                  */
1018                 l2_lookup.macaddr = ether_addr_to_u64(addr);
1019                 l2_lookup.destports = BIT(port);
1020                 l2_lookup.vlanid = vid;
1021
1022                 if (last_unused >= 0) {
1023                         way = last_unused;
1024                 } else {
1025                         /* Bin is full, need to evict somebody.
1026                          * Choose victim at random. If you get these messages
1027                          * often, you may need to consider changing the
1028                          * distribution function:
1029                          * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1030                          */
1031                         get_random_bytes(&way, sizeof(u8));
1032                         way %= SJA1105ET_FDB_BIN_SIZE;
1033                         dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1034                                  bin, addr, way);
1035                         /* Evict entry */
1036                         sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1037                                                      index, NULL, false);
1038                 }
1039         }
1040         l2_lookup.index = sja1105et_fdb_index(bin, way);
1041
1042         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1043                                           l2_lookup.index, &l2_lookup,
1044                                           true);
1045         if (rc < 0)
1046                 return rc;
1047
1048         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1049 }
1050
1051 int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1052                       const unsigned char *addr, u16 vid)
1053 {
1054         struct sja1105_l2_lookup_entry l2_lookup = {0};
1055         struct sja1105_private *priv = ds->priv;
1056         int index, bin, way, rc;
1057         bool keep;
1058
1059         bin = sja1105et_fdb_hash(priv, addr, vid);
1060         way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1061                                             &l2_lookup, NULL);
1062         if (way < 0)
1063                 return 0;
1064         index = sja1105et_fdb_index(bin, way);
1065
1066         /* We have an FDB entry. Is our port in the destination mask? If yes,
1067          * we need to remove it. If the resulting port mask becomes empty, we
1068          * need to completely evict the FDB entry.
1069          * Otherwise we just write it back.
1070          */
1071         l2_lookup.destports &= ~BIT(port);
1072
1073         if (l2_lookup.destports)
1074                 keep = true;
1075         else
1076                 keep = false;
1077
1078         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1079                                           index, &l2_lookup, keep);
1080         if (rc < 0)
1081                 return rc;
1082
1083         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1084 }
1085
1086 int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1087                         const unsigned char *addr, u16 vid)
1088 {
1089         struct sja1105_l2_lookup_entry l2_lookup = {0};
1090         struct sja1105_private *priv = ds->priv;
1091         int rc, i;
1092
1093         /* Search for an existing entry in the FDB table */
1094         l2_lookup.macaddr = ether_addr_to_u64(addr);
1095         l2_lookup.vlanid = vid;
1096         l2_lookup.iotag = SJA1105_S_TAG;
1097         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1098         if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
1099                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1100                 l2_lookup.mask_iotag = BIT(0);
1101         } else {
1102                 l2_lookup.mask_vlanid = 0;
1103                 l2_lookup.mask_iotag = 0;
1104         }
1105         l2_lookup.destports = BIT(port);
1106
1107         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1108                                          SJA1105_SEARCH, &l2_lookup);
1109         if (rc == 0) {
1110                 /* Found and this port is already in the entry's
1111                  * port mask => job done
1112                  */
1113                 if (l2_lookup.destports & BIT(port))
1114                         return 0;
1115                 /* l2_lookup.index is populated by the switch in case it
1116                  * found something.
1117                  */
1118                 l2_lookup.destports |= BIT(port);
1119                 goto skip_finding_an_index;
1120         }
1121
1122         /* Not found, so try to find an unused spot in the FDB.
1123          * This is slightly inefficient because the strategy is knock-knock at
1124          * every possible position from 0 to 1023.
1125          */
1126         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1127                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1128                                                  i, NULL);
1129                 if (rc < 0)
1130                         break;
1131         }
1132         if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1133                 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1134                 return -EINVAL;
1135         }
1136         l2_lookup.lockeds = true;
1137         l2_lookup.index = i;
1138
1139 skip_finding_an_index:
1140         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1141                                           l2_lookup.index, &l2_lookup,
1142                                           true);
1143         if (rc < 0)
1144                 return rc;
1145
1146         return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
1147 }
1148
1149 int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1150                         const unsigned char *addr, u16 vid)
1151 {
1152         struct sja1105_l2_lookup_entry l2_lookup = {0};
1153         struct sja1105_private *priv = ds->priv;
1154         bool keep;
1155         int rc;
1156
1157         l2_lookup.macaddr = ether_addr_to_u64(addr);
1158         l2_lookup.vlanid = vid;
1159         l2_lookup.iotag = SJA1105_S_TAG;
1160         l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
1161         if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
1162                 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1163                 l2_lookup.mask_iotag = BIT(0);
1164         } else {
1165                 l2_lookup.mask_vlanid = 0;
1166                 l2_lookup.mask_iotag = 0;
1167         }
1168         l2_lookup.destports = BIT(port);
1169
1170         rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1171                                          SJA1105_SEARCH, &l2_lookup);
1172         if (rc < 0)
1173                 return 0;
1174
1175         l2_lookup.destports &= ~BIT(port);
1176
1177         /* Decide whether we remove just this port from the FDB entry,
1178          * or if we remove it completely.
1179          */
1180         if (l2_lookup.destports)
1181                 keep = true;
1182         else
1183                 keep = false;
1184
1185         rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1186                                           l2_lookup.index, &l2_lookup, keep);
1187         if (rc < 0)
1188                 return rc;
1189
1190         return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
1191 }
1192
1193 static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1194                            const unsigned char *addr, u16 vid)
1195 {
1196         struct sja1105_private *priv = ds->priv;
1197
1198         /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1199          * so the switch still does some VLAN processing internally.
1200          * But Shared VLAN Learning (SVL) is also active, and it will take
1201          * care of autonomous forwarding between the unique pvid's of each
1202          * port.  Here we just make sure that users can't add duplicate FDB
1203          * entries when in this mode - the actual VID doesn't matter except
1204          * for what gets printed in 'bridge fdb show'.  In the case of zero,
1205          * no VID gets printed at all.
1206          */
1207         if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1208                 vid = 0;
1209
1210         return priv->info->fdb_add_cmd(ds, port, addr, vid);
1211 }
1212
1213 static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1214                            const unsigned char *addr, u16 vid)
1215 {
1216         struct sja1105_private *priv = ds->priv;
1217
1218         if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1219                 vid = 0;
1220
1221         return priv->info->fdb_del_cmd(ds, port, addr, vid);
1222 }
1223
1224 static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1225                             dsa_fdb_dump_cb_t *cb, void *data)
1226 {
1227         struct sja1105_private *priv = ds->priv;
1228         struct device *dev = ds->dev;
1229         int i;
1230
1231         for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1232                 struct sja1105_l2_lookup_entry l2_lookup = {0};
1233                 u8 macaddr[ETH_ALEN];
1234                 int rc;
1235
1236                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1237                                                  i, &l2_lookup);
1238                 /* No fdb entry at i, not an issue */
1239                 if (rc == -ENOENT)
1240                         continue;
1241                 if (rc) {
1242                         dev_err(dev, "Failed to dump FDB: %d\n", rc);
1243                         return rc;
1244                 }
1245
1246                 /* FDB dump callback is per port. This means we have to
1247                  * disregard a valid entry if it's not for this port, even if
1248                  * only to revisit it later. This is inefficient because the
1249                  * 1024-sized FDB table needs to be traversed 4 times through
1250                  * SPI during a 'bridge fdb show' command.
1251                  */
1252                 if (!(l2_lookup.destports & BIT(port)))
1253                         continue;
1254                 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1255
1256                 /* We need to hide the dsa_8021q VLANs from the user. */
1257                 if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
1258                         l2_lookup.vlanid = 0;
1259                 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
1260         }
1261         return 0;
1262 }
1263
1264 /* This callback needs to be present */
1265 static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1266                                const struct switchdev_obj_port_mdb *mdb)
1267 {
1268         return 0;
1269 }
1270
1271 static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1272                             const struct switchdev_obj_port_mdb *mdb)
1273 {
1274         sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1275 }
1276
1277 static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1278                            const struct switchdev_obj_port_mdb *mdb)
1279 {
1280         return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1281 }
1282
1283 static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1284                                  struct net_device *br, bool member)
1285 {
1286         struct sja1105_l2_forwarding_entry *l2_fwd;
1287         struct sja1105_private *priv = ds->priv;
1288         int i, rc;
1289
1290         l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1291
1292         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1293                 /* Add this port to the forwarding matrix of the
1294                  * other ports in the same bridge, and viceversa.
1295                  */
1296                 if (!dsa_is_user_port(ds, i))
1297                         continue;
1298                 /* For the ports already under the bridge, only one thing needs
1299                  * to be done, and that is to add this port to their
1300                  * reachability domain. So we can perform the SPI write for
1301                  * them immediately. However, for this port itself (the one
1302                  * that is new to the bridge), we need to add all other ports
1303                  * to its reachability domain. So we do that incrementally in
1304                  * this loop, and perform the SPI write only at the end, once
1305                  * the domain contains all other bridge ports.
1306                  */
1307                 if (i == port)
1308                         continue;
1309                 if (dsa_to_port(ds, i)->bridge_dev != br)
1310                         continue;
1311                 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1312                 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1313
1314                 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1315                                                   i, &l2_fwd[i], true);
1316                 if (rc < 0)
1317                         return rc;
1318         }
1319
1320         return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1321                                             port, &l2_fwd[port], true);
1322 }
1323
1324 static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1325                                          u8 state)
1326 {
1327         struct sja1105_private *priv = ds->priv;
1328         struct sja1105_mac_config_entry *mac;
1329
1330         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1331
1332         switch (state) {
1333         case BR_STATE_DISABLED:
1334         case BR_STATE_BLOCKING:
1335                 /* From UM10944 description of DRPDTAG (why put this there?):
1336                  * "Management traffic flows to the port regardless of the state
1337                  * of the INGRESS flag". So BPDUs are still be allowed to pass.
1338                  * At the moment no difference between DISABLED and BLOCKING.
1339                  */
1340                 mac[port].ingress   = false;
1341                 mac[port].egress    = false;
1342                 mac[port].dyn_learn = false;
1343                 break;
1344         case BR_STATE_LISTENING:
1345                 mac[port].ingress   = true;
1346                 mac[port].egress    = false;
1347                 mac[port].dyn_learn = false;
1348                 break;
1349         case BR_STATE_LEARNING:
1350                 mac[port].ingress   = true;
1351                 mac[port].egress    = false;
1352                 mac[port].dyn_learn = true;
1353                 break;
1354         case BR_STATE_FORWARDING:
1355                 mac[port].ingress   = true;
1356                 mac[port].egress    = true;
1357                 mac[port].dyn_learn = true;
1358                 break;
1359         default:
1360                 dev_err(ds->dev, "invalid STP state: %d\n", state);
1361                 return;
1362         }
1363
1364         sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1365                                      &mac[port], true);
1366 }
1367
1368 static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1369                                struct net_device *br)
1370 {
1371         return sja1105_bridge_member(ds, port, br, true);
1372 }
1373
1374 static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1375                                  struct net_device *br)
1376 {
1377         sja1105_bridge_member(ds, port, br, false);
1378 }
1379
1380 /* For situations where we need to change a setting at runtime that is only
1381  * available through the static configuration, resetting the switch in order
1382  * to upload the new static config is unavoidable. Back up the settings we
1383  * modify at runtime (currently only MAC) and restore them after uploading,
1384  * such that this operation is relatively seamless.
1385  */
1386 int sja1105_static_config_reload(struct sja1105_private *priv)
1387 {
1388         struct sja1105_mac_config_entry *mac;
1389         int speed_mbps[SJA1105_NUM_PORTS];
1390         int rc, i;
1391
1392         mutex_lock(&priv->mgmt_lock);
1393
1394         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1395
1396         /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1397          * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1398          * switch wants to see in the static config in order to allow us to
1399          * change it through the dynamic interface later.
1400          */
1401         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1402                 speed_mbps[i] = sja1105_speed[mac[i].speed];
1403                 mac[i].speed = SJA1105_SPEED_AUTO;
1404         }
1405
1406         /* Reset switch and send updated static configuration */
1407         rc = sja1105_static_config_upload(priv);
1408         if (rc < 0)
1409                 goto out;
1410
1411         /* Configure the CGU (PLLs) for MII and RMII PHYs.
1412          * For these interfaces there is no dynamic configuration
1413          * needed, since PLLs have same settings at all speeds.
1414          */
1415         rc = sja1105_clocking_setup(priv);
1416         if (rc < 0)
1417                 goto out;
1418
1419         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1420                 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
1421                 if (rc < 0)
1422                         goto out;
1423         }
1424 out:
1425         mutex_unlock(&priv->mgmt_lock);
1426
1427         return rc;
1428 }
1429
1430 static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1431 {
1432         struct sja1105_mac_config_entry *mac;
1433
1434         mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1435
1436         mac[port].vlanid = pvid;
1437
1438         return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1439                                            &mac[port], true);
1440 }
1441
1442 static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
1443 {
1444         struct sja1105_vlan_lookup_entry *vlan;
1445         int count, i;
1446
1447         vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
1448         count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
1449
1450         for (i = 0; i < count; i++)
1451                 if (vlan[i].vlanid == vid)
1452                         return i;
1453
1454         /* Return an invalid entry index if not found */
1455         return -1;
1456 }
1457
1458 static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
1459                               bool enabled, bool untagged)
1460 {
1461         struct sja1105_vlan_lookup_entry *vlan;
1462         struct sja1105_table *table;
1463         bool keep = true;
1464         int match, rc;
1465
1466         table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
1467
1468         match = sja1105_is_vlan_configured(priv, vid);
1469         if (match < 0) {
1470                 /* Can't delete a missing entry. */
1471                 if (!enabled)
1472                         return 0;
1473                 rc = sja1105_table_resize(table, table->entry_count + 1);
1474                 if (rc)
1475                         return rc;
1476                 match = table->entry_count - 1;
1477         }
1478         /* Assign pointer after the resize (it's new memory) */
1479         vlan = table->entries;
1480         vlan[match].vlanid = vid;
1481         if (enabled) {
1482                 vlan[match].vlan_bc |= BIT(port);
1483                 vlan[match].vmemb_port |= BIT(port);
1484         } else {
1485                 vlan[match].vlan_bc &= ~BIT(port);
1486                 vlan[match].vmemb_port &= ~BIT(port);
1487         }
1488         /* Also unset tag_port if removing this VLAN was requested,
1489          * just so we don't have a confusing bitmap (no practical purpose).
1490          */
1491         if (untagged || !enabled)
1492                 vlan[match].tag_port &= ~BIT(port);
1493         else
1494                 vlan[match].tag_port |= BIT(port);
1495         /* If there's no port left as member of this VLAN,
1496          * it's time for it to go.
1497          */
1498         if (!vlan[match].vmemb_port)
1499                 keep = false;
1500
1501         dev_dbg(priv->ds->dev,
1502                 "%s: port %d, vid %llu, broadcast domain 0x%llx, "
1503                 "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
1504                 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
1505                 vlan[match].vmemb_port, vlan[match].tag_port, keep);
1506
1507         rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
1508                                           &vlan[match], keep);
1509         if (rc < 0)
1510                 return rc;
1511
1512         if (!keep)
1513                 return sja1105_table_delete_entry(table, match);
1514
1515         return 0;
1516 }
1517
1518 static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1519 {
1520         int rc, i;
1521
1522         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1523                 rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1524                 if (rc < 0) {
1525                         dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1526                                 i, rc);
1527                         return rc;
1528                 }
1529         }
1530         dev_info(ds->dev, "%s switch tagging\n",
1531                  enabled ? "Enabled" : "Disabled");
1532         return 0;
1533 }
1534
1535 static enum dsa_tag_protocol
1536 sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
1537 {
1538         return DSA_TAG_PROTO_SJA1105;
1539 }
1540
1541 /* This callback needs to be present */
1542 static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1543                                 const struct switchdev_obj_port_vlan *vlan)
1544 {
1545         return 0;
1546 }
1547
1548 /* The TPID setting belongs to the General Parameters table,
1549  * which can only be partially reconfigured at runtime (and not the TPID).
1550  * So a switch reset is required.
1551  */
1552 static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1553 {
1554         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1555         struct sja1105_general_params_entry *general_params;
1556         struct sja1105_private *priv = ds->priv;
1557         struct sja1105_table *table;
1558         u16 tpid, tpid2;
1559         int rc;
1560
1561         if (enabled) {
1562                 /* Enable VLAN filtering. */
1563                 tpid  = ETH_P_8021Q;
1564                 tpid2 = ETH_P_8021AD;
1565         } else {
1566                 /* Disable VLAN filtering. */
1567                 tpid  = ETH_P_SJA1105;
1568                 tpid2 = ETH_P_SJA1105;
1569         }
1570
1571         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1572         general_params = table->entries;
1573         /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
1574         general_params->tpid = tpid;
1575         /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
1576         general_params->tpid2 = tpid2;
1577         /* When VLAN filtering is on, we need to at least be able to
1578          * decode management traffic through the "backup plan".
1579          */
1580         general_params->incl_srcpt1 = enabled;
1581         general_params->incl_srcpt0 = enabled;
1582
1583         /* VLAN filtering => independent VLAN learning.
1584          * No VLAN filtering => shared VLAN learning.
1585          *
1586          * In shared VLAN learning mode, untagged traffic still gets
1587          * pvid-tagged, and the FDB table gets populated with entries
1588          * containing the "real" (pvid or from VLAN tag) VLAN ID.
1589          * However the switch performs a masked L2 lookup in the FDB,
1590          * effectively only looking up a frame's DMAC (and not VID) for the
1591          * forwarding decision.
1592          *
1593          * This is extremely convenient for us, because in modes with
1594          * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
1595          * each front panel port. This is good for identification but breaks
1596          * learning badly - the VID of the learnt FDB entry is unique, aka
1597          * no frames coming from any other port are going to have it. So
1598          * for forwarding purposes, this is as though learning was broken
1599          * (all frames get flooded).
1600          */
1601         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1602         l2_lookup_params = table->entries;
1603         l2_lookup_params->shared_learn = !enabled;
1604
1605         rc = sja1105_static_config_reload(priv);
1606         if (rc)
1607                 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
1608
1609         /* Switch port identification based on 802.1Q is only passable
1610          * if we are not under a vlan_filtering bridge. So make sure
1611          * the two configurations are mutually exclusive.
1612          */
1613         return sja1105_setup_8021q_tagging(ds, !enabled);
1614 }
1615
1616 static void sja1105_vlan_add(struct dsa_switch *ds, int port,
1617                              const struct switchdev_obj_port_vlan *vlan)
1618 {
1619         struct sja1105_private *priv = ds->priv;
1620         u16 vid;
1621         int rc;
1622
1623         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1624                 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
1625                                         BRIDGE_VLAN_INFO_UNTAGGED);
1626                 if (rc < 0) {
1627                         dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
1628                                 vid, port, rc);
1629                         return;
1630                 }
1631                 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1632                         rc = sja1105_pvid_apply(ds->priv, port, vid);
1633                         if (rc < 0) {
1634                                 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
1635                                         vid, port, rc);
1636                                 return;
1637                         }
1638                 }
1639         }
1640 }
1641
1642 static int sja1105_vlan_del(struct dsa_switch *ds, int port,
1643                             const struct switchdev_obj_port_vlan *vlan)
1644 {
1645         struct sja1105_private *priv = ds->priv;
1646         u16 vid;
1647         int rc;
1648
1649         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1650                 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
1651                                         BRIDGE_VLAN_INFO_UNTAGGED);
1652                 if (rc < 0) {
1653                         dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
1654                                 vid, port, rc);
1655                         return rc;
1656                 }
1657         }
1658         return 0;
1659 }
1660
1661 /* The programming model for the SJA1105 switch is "all-at-once" via static
1662  * configuration tables. Some of these can be dynamically modified at runtime,
1663  * but not the xMII mode parameters table.
1664  * Furthermode, some PHYs may not have crystals for generating their clocks
1665  * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
1666  * ref_clk pin. So port clocking needs to be initialized early, before
1667  * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
1668  * Setting correct PHY link speed does not matter now.
1669  * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
1670  * bindings are not yet parsed by DSA core. We need to parse early so that we
1671  * can populate the xMII mode parameters table.
1672  */
1673 static int sja1105_setup(struct dsa_switch *ds)
1674 {
1675         struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
1676         struct sja1105_private *priv = ds->priv;
1677         int rc;
1678
1679         rc = sja1105_parse_dt(priv, ports);
1680         if (rc < 0) {
1681                 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
1682                 return rc;
1683         }
1684
1685         /* Error out early if internal delays are required through DT
1686          * and we can't apply them.
1687          */
1688         rc = sja1105_parse_rgmii_delays(priv, ports);
1689         if (rc < 0) {
1690                 dev_err(ds->dev, "RGMII delay not supported\n");
1691                 return rc;
1692         }
1693
1694         rc = sja1105_ptp_clock_register(priv);
1695         if (rc < 0) {
1696                 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
1697                 return rc;
1698         }
1699         /* Create and send configuration down to device */
1700         rc = sja1105_static_config_load(priv, ports);
1701         if (rc < 0) {
1702                 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
1703                 return rc;
1704         }
1705         /* Configure the CGU (PHY link modes and speeds) */
1706         rc = sja1105_clocking_setup(priv);
1707         if (rc < 0) {
1708                 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
1709                 return rc;
1710         }
1711         /* On SJA1105, VLAN filtering per se is always enabled in hardware.
1712          * The only thing we can do to disable it is lie about what the 802.1Q
1713          * EtherType is.
1714          * So it will still try to apply VLAN filtering, but all ingress
1715          * traffic (except frames received with EtherType of ETH_P_SJA1105)
1716          * will be internally tagged with a distorted VLAN header where the
1717          * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
1718          */
1719         ds->vlan_filtering_is_global = true;
1720
1721         /* Advertise the 8 egress queues */
1722         ds->num_tx_queues = SJA1105_NUM_TC;
1723
1724         /* The DSA/switchdev model brings up switch ports in standalone mode by
1725          * default, and that means vlan_filtering is 0 since they're not under
1726          * a bridge, so it's safe to set up switch tagging at this time.
1727          */
1728         return sja1105_setup_8021q_tagging(ds, true);
1729 }
1730
1731 static void sja1105_teardown(struct dsa_switch *ds)
1732 {
1733         struct sja1105_private *priv = ds->priv;
1734
1735         sja1105_tas_teardown(ds);
1736         cancel_work_sync(&priv->tagger_data.rxtstamp_work);
1737         skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
1738         sja1105_ptp_clock_unregister(priv);
1739         sja1105_static_config_free(&priv->static_config);
1740 }
1741
1742 static int sja1105_port_enable(struct dsa_switch *ds, int port,
1743                                struct phy_device *phy)
1744 {
1745         struct net_device *slave;
1746
1747         if (!dsa_is_user_port(ds, port))
1748                 return 0;
1749
1750         slave = ds->ports[port].slave;
1751
1752         slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1753
1754         return 0;
1755 }
1756
1757 static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
1758                              struct sk_buff *skb, bool takets)
1759 {
1760         struct sja1105_mgmt_entry mgmt_route = {0};
1761         struct sja1105_private *priv = ds->priv;
1762         struct ethhdr *hdr;
1763         int timeout = 10;
1764         int rc;
1765
1766         hdr = eth_hdr(skb);
1767
1768         mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
1769         mgmt_route.destports = BIT(port);
1770         mgmt_route.enfport = 1;
1771         mgmt_route.tsreg = 0;
1772         mgmt_route.takets = takets;
1773
1774         rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1775                                           slot, &mgmt_route, true);
1776         if (rc < 0) {
1777                 kfree_skb(skb);
1778                 return rc;
1779         }
1780
1781         /* Transfer skb to the host port. */
1782         dsa_enqueue_skb(skb, ds->ports[port].slave);
1783
1784         /* Wait until the switch has processed the frame */
1785         do {
1786                 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
1787                                                  slot, &mgmt_route);
1788                 if (rc < 0) {
1789                         dev_err_ratelimited(priv->ds->dev,
1790                                             "failed to poll for mgmt route\n");
1791                         continue;
1792                 }
1793
1794                 /* UM10944: The ENFPORT flag of the respective entry is
1795                  * cleared when a match is found. The host can use this
1796                  * flag as an acknowledgment.
1797                  */
1798                 cpu_relax();
1799         } while (mgmt_route.enfport && --timeout);
1800
1801         if (!timeout) {
1802                 /* Clean up the management route so that a follow-up
1803                  * frame may not match on it by mistake.
1804                  * This is only hardware supported on P/Q/R/S - on E/T it is
1805                  * a no-op and we are silently discarding the -EOPNOTSUPP.
1806                  */
1807                 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1808                                              slot, &mgmt_route, false);
1809                 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
1810         }
1811
1812         return NETDEV_TX_OK;
1813 }
1814
1815 /* Deferred work is unfortunately necessary because setting up the management
1816  * route cannot be done from atomit context (SPI transfer takes a sleepable
1817  * lock on the bus)
1818  */
1819 static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
1820                                               struct sk_buff *skb)
1821 {
1822         struct sja1105_private *priv = ds->priv;
1823         struct sja1105_port *sp = &priv->ports[port];
1824         struct skb_shared_hwtstamps shwt = {0};
1825         int slot = sp->mgmt_slot;
1826         struct sk_buff *clone;
1827         u64 now, ts;
1828         int rc;
1829
1830         /* The tragic fact about the switch having 4x2 slots for installing
1831          * management routes is that all of them except one are actually
1832          * useless.
1833          * If 2 slots are simultaneously configured for two BPDUs sent to the
1834          * same (multicast) DMAC but on different egress ports, the switch
1835          * would confuse them and redirect first frame it receives on the CPU
1836          * port towards the port configured on the numerically first slot
1837          * (therefore wrong port), then second received frame on second slot
1838          * (also wrong port).
1839          * So for all practical purposes, there needs to be a lock that
1840          * prevents that from happening. The slot used here is utterly useless
1841          * (could have simply been 0 just as fine), but we are doing it
1842          * nonetheless, in case a smarter idea ever comes up in the future.
1843          */
1844         mutex_lock(&priv->mgmt_lock);
1845
1846         /* The clone, if there, was made by dsa_skb_tx_timestamp */
1847         clone = DSA_SKB_CB(skb)->clone;
1848
1849         sja1105_mgmt_xmit(ds, port, slot, skb, !!clone);
1850
1851         if (!clone)
1852                 goto out;
1853
1854         skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
1855
1856         mutex_lock(&priv->ptp_lock);
1857
1858         now = priv->tstamp_cc.read(&priv->tstamp_cc);
1859
1860         rc = sja1105_ptpegr_ts_poll(priv, slot, &ts);
1861         if (rc < 0) {
1862                 dev_err(ds->dev, "xmit: timed out polling for tstamp\n");
1863                 kfree_skb(clone);
1864                 goto out_unlock_ptp;
1865         }
1866
1867         ts = sja1105_tstamp_reconstruct(priv, now, ts);
1868         ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
1869
1870         shwt.hwtstamp = ns_to_ktime(ts);
1871         skb_complete_tx_timestamp(clone, &shwt);
1872
1873 out_unlock_ptp:
1874         mutex_unlock(&priv->ptp_lock);
1875 out:
1876         mutex_unlock(&priv->mgmt_lock);
1877         return NETDEV_TX_OK;
1878 }
1879
1880 /* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
1881  * which cannot be reconfigured at runtime. So a switch reset is required.
1882  */
1883 static int sja1105_set_ageing_time(struct dsa_switch *ds,
1884                                    unsigned int ageing_time)
1885 {
1886         struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1887         struct sja1105_private *priv = ds->priv;
1888         struct sja1105_table *table;
1889         unsigned int maxage;
1890
1891         table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1892         l2_lookup_params = table->entries;
1893
1894         maxage = SJA1105_AGEING_TIME_MS(ageing_time);
1895
1896         if (l2_lookup_params->maxage == maxage)
1897                 return 0;
1898
1899         l2_lookup_params->maxage = maxage;
1900
1901         return sja1105_static_config_reload(priv);
1902 }
1903
1904 /* Must be called only with priv->tagger_data.state bit
1905  * SJA1105_HWTS_RX_EN cleared
1906  */
1907 static int sja1105_change_rxtstamping(struct sja1105_private *priv,
1908                                       bool on)
1909 {
1910         struct sja1105_general_params_entry *general_params;
1911         struct sja1105_table *table;
1912         int rc;
1913
1914         table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1915         general_params = table->entries;
1916         general_params->send_meta1 = on;
1917         general_params->send_meta0 = on;
1918
1919         rc = sja1105_init_avb_params(priv, on);
1920         if (rc < 0)
1921                 return rc;
1922
1923         /* Initialize the meta state machine to a known state */
1924         if (priv->tagger_data.stampable_skb) {
1925                 kfree_skb(priv->tagger_data.stampable_skb);
1926                 priv->tagger_data.stampable_skb = NULL;
1927         }
1928
1929         return sja1105_static_config_reload(priv);
1930 }
1931
1932 static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
1933                                 struct ifreq *ifr)
1934 {
1935         struct sja1105_private *priv = ds->priv;
1936         struct hwtstamp_config config;
1937         bool rx_on;
1938         int rc;
1939
1940         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1941                 return -EFAULT;
1942
1943         switch (config.tx_type) {
1944         case HWTSTAMP_TX_OFF:
1945                 priv->ports[port].hwts_tx_en = false;
1946                 break;
1947         case HWTSTAMP_TX_ON:
1948                 priv->ports[port].hwts_tx_en = true;
1949                 break;
1950         default:
1951                 return -ERANGE;
1952         }
1953
1954         switch (config.rx_filter) {
1955         case HWTSTAMP_FILTER_NONE:
1956                 rx_on = false;
1957                 break;
1958         default:
1959                 rx_on = true;
1960                 break;
1961         }
1962
1963         if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
1964                 clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
1965
1966                 rc = sja1105_change_rxtstamping(priv, rx_on);
1967                 if (rc < 0) {
1968                         dev_err(ds->dev,
1969                                 "Failed to change RX timestamping: %d\n", rc);
1970                         return rc;
1971                 }
1972                 if (rx_on)
1973                         set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
1974         }
1975
1976         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1977                 return -EFAULT;
1978         return 0;
1979 }
1980
1981 static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
1982                                 struct ifreq *ifr)
1983 {
1984         struct sja1105_private *priv = ds->priv;
1985         struct hwtstamp_config config;
1986
1987         config.flags = 0;
1988         if (priv->ports[port].hwts_tx_en)
1989                 config.tx_type = HWTSTAMP_TX_ON;
1990         else
1991                 config.tx_type = HWTSTAMP_TX_OFF;
1992         if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
1993                 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1994         else
1995                 config.rx_filter = HWTSTAMP_FILTER_NONE;
1996
1997         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1998                 -EFAULT : 0;
1999 }
2000
2001 #define to_tagger(d) \
2002         container_of((d), struct sja1105_tagger_data, rxtstamp_work)
2003 #define to_sja1105(d) \
2004         container_of((d), struct sja1105_private, tagger_data)
2005
2006 static void sja1105_rxtstamp_work(struct work_struct *work)
2007 {
2008         struct sja1105_tagger_data *data = to_tagger(work);
2009         struct sja1105_private *priv = to_sja1105(data);
2010         struct sk_buff *skb;
2011         u64 now;
2012
2013         mutex_lock(&priv->ptp_lock);
2014
2015         while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
2016                 struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
2017                 u64 ts;
2018
2019                 now = priv->tstamp_cc.read(&priv->tstamp_cc);
2020
2021                 *shwt = (struct skb_shared_hwtstamps) {0};
2022
2023                 ts = SJA1105_SKB_CB(skb)->meta_tstamp;
2024                 ts = sja1105_tstamp_reconstruct(priv, now, ts);
2025                 ts = timecounter_cyc2time(&priv->tstamp_tc, ts);
2026
2027                 shwt->hwtstamp = ns_to_ktime(ts);
2028                 netif_rx_ni(skb);
2029         }
2030
2031         mutex_unlock(&priv->ptp_lock);
2032 }
2033
2034 /* Called from dsa_skb_defer_rx_timestamp */
2035 static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
2036                                   struct sk_buff *skb, unsigned int type)
2037 {
2038         struct sja1105_private *priv = ds->priv;
2039         struct sja1105_tagger_data *data = &priv->tagger_data;
2040
2041         if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
2042                 return false;
2043
2044         /* We need to read the full PTP clock to reconstruct the Rx
2045          * timestamp. For that we need a sleepable context.
2046          */
2047         skb_queue_tail(&data->skb_rxtstamp_queue, skb);
2048         schedule_work(&data->rxtstamp_work);
2049         return true;
2050 }
2051
2052 /* Called from dsa_skb_tx_timestamp. This callback is just to make DSA clone
2053  * the skb and have it available in DSA_SKB_CB in the .port_deferred_xmit
2054  * callback, where we will timestamp it synchronously.
2055  */
2056 static bool sja1105_port_txtstamp(struct dsa_switch *ds, int port,
2057                                   struct sk_buff *skb, unsigned int type)
2058 {
2059         struct sja1105_private *priv = ds->priv;
2060         struct sja1105_port *sp = &priv->ports[port];
2061
2062         if (!sp->hwts_tx_en)
2063                 return false;
2064
2065         return true;
2066 }
2067
2068 static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
2069                                  enum tc_setup_type type,
2070                                  void *type_data)
2071 {
2072         switch (type) {
2073         case TC_SETUP_QDISC_TAPRIO:
2074                 return sja1105_setup_tc_taprio(ds, port, type_data);
2075         default:
2076                 return -EOPNOTSUPP;
2077         }
2078 }
2079
2080 static const struct dsa_switch_ops sja1105_switch_ops = {
2081         .get_tag_protocol       = sja1105_get_tag_protocol,
2082         .setup                  = sja1105_setup,
2083         .teardown               = sja1105_teardown,
2084         .set_ageing_time        = sja1105_set_ageing_time,
2085         .phylink_validate       = sja1105_phylink_validate,
2086         .phylink_mac_config     = sja1105_mac_config,
2087         .phylink_mac_link_up    = sja1105_mac_link_up,
2088         .phylink_mac_link_down  = sja1105_mac_link_down,
2089         .get_strings            = sja1105_get_strings,
2090         .get_ethtool_stats      = sja1105_get_ethtool_stats,
2091         .get_sset_count         = sja1105_get_sset_count,
2092         .get_ts_info            = sja1105_get_ts_info,
2093         .port_enable            = sja1105_port_enable,
2094         .port_fdb_dump          = sja1105_fdb_dump,
2095         .port_fdb_add           = sja1105_fdb_add,
2096         .port_fdb_del           = sja1105_fdb_del,
2097         .port_bridge_join       = sja1105_bridge_join,
2098         .port_bridge_leave      = sja1105_bridge_leave,
2099         .port_stp_state_set     = sja1105_bridge_stp_state_set,
2100         .port_vlan_prepare      = sja1105_vlan_prepare,
2101         .port_vlan_filtering    = sja1105_vlan_filtering,
2102         .port_vlan_add          = sja1105_vlan_add,
2103         .port_vlan_del          = sja1105_vlan_del,
2104         .port_mdb_prepare       = sja1105_mdb_prepare,
2105         .port_mdb_add           = sja1105_mdb_add,
2106         .port_mdb_del           = sja1105_mdb_del,
2107         .port_deferred_xmit     = sja1105_port_deferred_xmit,
2108         .port_hwtstamp_get      = sja1105_hwtstamp_get,
2109         .port_hwtstamp_set      = sja1105_hwtstamp_set,
2110         .port_rxtstamp          = sja1105_port_rxtstamp,
2111         .port_txtstamp          = sja1105_port_txtstamp,
2112         .port_setup_tc          = sja1105_port_setup_tc,
2113 };
2114
2115 static int sja1105_check_device_id(struct sja1105_private *priv)
2116 {
2117         const struct sja1105_regs *regs = priv->info->regs;
2118         u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
2119         struct device *dev = &priv->spidev->dev;
2120         u64 device_id;
2121         u64 part_no;
2122         int rc;
2123
2124         rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
2125                                   &device_id, SJA1105_SIZE_DEVICE_ID);
2126         if (rc < 0)
2127                 return rc;
2128
2129         if (device_id != priv->info->device_id) {
2130                 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
2131                         priv->info->device_id, device_id);
2132                 return -ENODEV;
2133         }
2134
2135         rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
2136                                          prod_id, SJA1105_SIZE_DEVICE_ID);
2137         if (rc < 0)
2138                 return rc;
2139
2140         sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
2141
2142         if (part_no != priv->info->part_no) {
2143                 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
2144                         priv->info->part_no, part_no);
2145                 return -ENODEV;
2146         }
2147
2148         return 0;
2149 }
2150
2151 static int sja1105_probe(struct spi_device *spi)
2152 {
2153         struct sja1105_tagger_data *tagger_data;
2154         struct device *dev = &spi->dev;
2155         struct sja1105_private *priv;
2156         struct dsa_switch *ds;
2157         int rc, i;
2158
2159         if (!dev->of_node) {
2160                 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
2161                 return -EINVAL;
2162         }
2163
2164         priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
2165         if (!priv)
2166                 return -ENOMEM;
2167
2168         /* Configure the optional reset pin and bring up switch */
2169         priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
2170         if (IS_ERR(priv->reset_gpio))
2171                 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
2172         else
2173                 sja1105_hw_reset(priv->reset_gpio, 1, 1);
2174
2175         /* Populate our driver private structure (priv) based on
2176          * the device tree node that was probed (spi)
2177          */
2178         priv->spidev = spi;
2179         spi_set_drvdata(spi, priv);
2180
2181         /* Configure the SPI bus */
2182         spi->bits_per_word = 8;
2183         rc = spi_setup(spi);
2184         if (rc < 0) {
2185                 dev_err(dev, "Could not init SPI\n");
2186                 return rc;
2187         }
2188
2189         priv->info = of_device_get_match_data(dev);
2190
2191         /* Detect hardware device */
2192         rc = sja1105_check_device_id(priv);
2193         if (rc < 0) {
2194                 dev_err(dev, "Device ID check failed: %d\n", rc);
2195                 return rc;
2196         }
2197
2198         dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
2199
2200         ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
2201         if (!ds)
2202                 return -ENOMEM;
2203
2204         ds->ops = &sja1105_switch_ops;
2205         ds->priv = priv;
2206         priv->ds = ds;
2207
2208         tagger_data = &priv->tagger_data;
2209         skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
2210         INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
2211         spin_lock_init(&tagger_data->meta_lock);
2212
2213         /* Connections between dsa_port and sja1105_port */
2214         for (i = 0; i < SJA1105_NUM_PORTS; i++) {
2215                 struct sja1105_port *sp = &priv->ports[i];
2216
2217                 ds->ports[i].priv = sp;
2218                 sp->dp = &ds->ports[i];
2219                 sp->data = tagger_data;
2220         }
2221         mutex_init(&priv->mgmt_lock);
2222
2223         sja1105_tas_setup(ds);
2224
2225         return dsa_register_switch(priv->ds);
2226 }
2227
2228 static int sja1105_remove(struct spi_device *spi)
2229 {
2230         struct sja1105_private *priv = spi_get_drvdata(spi);
2231
2232         dsa_unregister_switch(priv->ds);
2233         return 0;
2234 }
2235
2236 static const struct of_device_id sja1105_dt_ids[] = {
2237         { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
2238         { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
2239         { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
2240         { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
2241         { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
2242         { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
2243         { /* sentinel */ },
2244 };
2245 MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
2246
2247 static struct spi_driver sja1105_driver = {
2248         .driver = {
2249                 .name  = "sja1105",
2250                 .owner = THIS_MODULE,
2251                 .of_match_table = of_match_ptr(sja1105_dt_ids),
2252         },
2253         .probe  = sja1105_probe,
2254         .remove = sja1105_remove,
2255 };
2256
2257 module_spi_driver(sja1105_driver);
2258
2259 MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
2260 MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
2261 MODULE_DESCRIPTION("SJA1105 Driver");
2262 MODULE_LICENSE("GPL v2");