Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / media / v4l2-core / v4l2-clk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * V4L2 clock service
4  *
5  * Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
6  */
7
8 #include <linux/atomic.h>
9 #include <linux/clk.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18
19 #include <media/v4l2-clk.h>
20 #include <media/v4l2-subdev.h>
21
22 static DEFINE_MUTEX(clk_lock);
23 static LIST_HEAD(clk_list);
24
25 static struct v4l2_clk *v4l2_clk_find(const char *dev_id)
26 {
27         struct v4l2_clk *clk;
28
29         list_for_each_entry(clk, &clk_list, list)
30                 if (!strcmp(dev_id, clk->dev_id))
31                         return clk;
32
33         return ERR_PTR(-ENODEV);
34 }
35
36 struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
37 {
38         struct v4l2_clk *clk;
39         struct clk *ccf_clk = clk_get(dev, id);
40         char clk_name[V4L2_CLK_NAME_SIZE];
41
42         if (PTR_ERR(ccf_clk) == -EPROBE_DEFER)
43                 return ERR_PTR(-EPROBE_DEFER);
44
45         if (!IS_ERR_OR_NULL(ccf_clk)) {
46                 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
47                 if (!clk) {
48                         clk_put(ccf_clk);
49                         return ERR_PTR(-ENOMEM);
50                 }
51                 clk->clk = ccf_clk;
52
53                 return clk;
54         }
55
56         mutex_lock(&clk_lock);
57         clk = v4l2_clk_find(dev_name(dev));
58
59         /* if dev_name is not found, try use the OF name to find again  */
60         if (PTR_ERR(clk) == -ENODEV && dev->of_node) {
61                 v4l2_clk_name_of(clk_name, sizeof(clk_name), dev->of_node);
62                 clk = v4l2_clk_find(clk_name);
63         }
64
65         if (!IS_ERR(clk))
66                 atomic_inc(&clk->use_count);
67         mutex_unlock(&clk_lock);
68
69         return clk;
70 }
71 EXPORT_SYMBOL(v4l2_clk_get);
72
73 void v4l2_clk_put(struct v4l2_clk *clk)
74 {
75         struct v4l2_clk *tmp;
76
77         if (IS_ERR(clk))
78                 return;
79
80         if (clk->clk) {
81                 clk_put(clk->clk);
82                 kfree(clk);
83                 return;
84         }
85
86         mutex_lock(&clk_lock);
87
88         list_for_each_entry(tmp, &clk_list, list)
89                 if (tmp == clk)
90                         atomic_dec(&clk->use_count);
91
92         mutex_unlock(&clk_lock);
93 }
94 EXPORT_SYMBOL(v4l2_clk_put);
95
96 static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
97 {
98         struct v4l2_clk *tmp;
99         int ret = -ENODEV;
100
101         mutex_lock(&clk_lock);
102
103         list_for_each_entry(tmp, &clk_list, list)
104                 if (tmp == clk) {
105                         ret = !try_module_get(clk->ops->owner);
106                         if (ret)
107                                 ret = -EFAULT;
108                         break;
109                 }
110
111         mutex_unlock(&clk_lock);
112
113         return ret;
114 }
115
116 static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
117 {
118         module_put(clk->ops->owner);
119 }
120
121 int v4l2_clk_enable(struct v4l2_clk *clk)
122 {
123         int ret;
124
125         if (clk->clk)
126                 return clk_prepare_enable(clk->clk);
127
128         ret = v4l2_clk_lock_driver(clk);
129         if (ret < 0)
130                 return ret;
131
132         mutex_lock(&clk->lock);
133
134         if (++clk->enable == 1 && clk->ops->enable) {
135                 ret = clk->ops->enable(clk);
136                 if (ret < 0)
137                         clk->enable--;
138         }
139
140         mutex_unlock(&clk->lock);
141
142         return ret;
143 }
144 EXPORT_SYMBOL(v4l2_clk_enable);
145
146 /*
147  * You might Oops if you try to disabled a disabled clock, because then the
148  * driver isn't locked and could have been unloaded by now, so, don't do that
149  */
150 void v4l2_clk_disable(struct v4l2_clk *clk)
151 {
152         int enable;
153
154         if (clk->clk)
155                 return clk_disable_unprepare(clk->clk);
156
157         mutex_lock(&clk->lock);
158
159         enable = --clk->enable;
160         if (WARN(enable < 0, "Unbalanced %s() on %s!\n", __func__,
161                  clk->dev_id))
162                 clk->enable++;
163         else if (!enable && clk->ops->disable)
164                 clk->ops->disable(clk);
165
166         mutex_unlock(&clk->lock);
167
168         v4l2_clk_unlock_driver(clk);
169 }
170 EXPORT_SYMBOL(v4l2_clk_disable);
171
172 unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
173 {
174         int ret;
175
176         if (clk->clk)
177                 return clk_get_rate(clk->clk);
178
179         ret = v4l2_clk_lock_driver(clk);
180         if (ret < 0)
181                 return ret;
182
183         mutex_lock(&clk->lock);
184         if (!clk->ops->get_rate)
185                 ret = -ENOSYS;
186         else
187                 ret = clk->ops->get_rate(clk);
188         mutex_unlock(&clk->lock);
189
190         v4l2_clk_unlock_driver(clk);
191
192         return ret;
193 }
194 EXPORT_SYMBOL(v4l2_clk_get_rate);
195
196 int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
197 {
198         int ret;
199
200         if (clk->clk) {
201                 long r = clk_round_rate(clk->clk, rate);
202                 if (r < 0)
203                         return r;
204                 return clk_set_rate(clk->clk, r);
205         }
206
207         ret = v4l2_clk_lock_driver(clk);
208
209         if (ret < 0)
210                 return ret;
211
212         mutex_lock(&clk->lock);
213         if (!clk->ops->set_rate)
214                 ret = -ENOSYS;
215         else
216                 ret = clk->ops->set_rate(clk, rate);
217         mutex_unlock(&clk->lock);
218
219         v4l2_clk_unlock_driver(clk);
220
221         return ret;
222 }
223 EXPORT_SYMBOL(v4l2_clk_set_rate);
224
225 struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
226                                    const char *dev_id,
227                                    void *priv)
228 {
229         struct v4l2_clk *clk;
230         int ret;
231
232         if (!ops || !dev_id)
233                 return ERR_PTR(-EINVAL);
234
235         clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
236         if (!clk)
237                 return ERR_PTR(-ENOMEM);
238
239         clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
240         if (!clk->dev_id) {
241                 ret = -ENOMEM;
242                 goto ealloc;
243         }
244         clk->ops = ops;
245         clk->priv = priv;
246         atomic_set(&clk->use_count, 0);
247         mutex_init(&clk->lock);
248
249         mutex_lock(&clk_lock);
250         if (!IS_ERR(v4l2_clk_find(dev_id))) {
251                 mutex_unlock(&clk_lock);
252                 ret = -EEXIST;
253                 goto eexist;
254         }
255         list_add_tail(&clk->list, &clk_list);
256         mutex_unlock(&clk_lock);
257
258         return clk;
259
260 eexist:
261 ealloc:
262         kfree(clk->dev_id);
263         kfree(clk);
264         return ERR_PTR(ret);
265 }
266 EXPORT_SYMBOL(v4l2_clk_register);
267
268 void v4l2_clk_unregister(struct v4l2_clk *clk)
269 {
270         if (WARN(atomic_read(&clk->use_count),
271                  "%s(): Refusing to unregister ref-counted %s clock!\n",
272                  __func__, clk->dev_id))
273                 return;
274
275         mutex_lock(&clk_lock);
276         list_del(&clk->list);
277         mutex_unlock(&clk_lock);
278
279         kfree(clk->dev_id);
280         kfree(clk);
281 }
282 EXPORT_SYMBOL(v4l2_clk_unregister);
283
284 struct v4l2_clk_fixed {
285         unsigned long rate;
286         struct v4l2_clk_ops ops;
287 };
288
289 static unsigned long fixed_get_rate(struct v4l2_clk *clk)
290 {
291         struct v4l2_clk_fixed *priv = clk->priv;
292         return priv->rate;
293 }
294
295 struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
296                                 unsigned long rate, struct module *owner)
297 {
298         struct v4l2_clk *clk;
299         struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
300
301         if (!priv)
302                 return ERR_PTR(-ENOMEM);
303
304         priv->rate = rate;
305         priv->ops.get_rate = fixed_get_rate;
306         priv->ops.owner = owner;
307
308         clk = v4l2_clk_register(&priv->ops, dev_id, priv);
309         if (IS_ERR(clk))
310                 kfree(priv);
311
312         return clk;
313 }
314 EXPORT_SYMBOL(__v4l2_clk_register_fixed);
315
316 void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
317 {
318         kfree(clk->priv);
319         v4l2_clk_unregister(clk);
320 }
321 EXPORT_SYMBOL(v4l2_clk_unregister_fixed);