1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Speed Select Interface: Common functions
4 * Copyright (c) 2019, Intel Corporation.
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
22 #include "isst_if_common.h"
24 #define MSR_THREAD_ID_INFO 0x53
25 #define MSR_CPU_BUS_NUMBER 0x128
27 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
29 static int punit_msr_white_list[] = {
30 MSR_TURBO_RATIO_LIMIT,
31 MSR_CONFIG_TDP_CONTROL,
32 MSR_TURBO_RATIO_LIMIT1,
33 MSR_TURBO_RATIO_LIMIT2,
36 struct isst_valid_cmd_ranges {
42 struct isst_cmd_set_req_type {
48 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
55 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
65 struct hlist_node hnode;
73 static DECLARE_HASHTABLE(isst_hash, 8);
74 static DEFINE_MUTEX(isst_hash_lock);
76 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
79 struct isst_cmd *sst_cmd;
81 sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
87 sst_cmd->mbox_cmd_type = mbox_cmd_type;
88 sst_cmd->param = param;
91 hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
96 static void isst_delete_hash(void)
98 struct isst_cmd *sst_cmd;
99 struct hlist_node *tmp;
102 hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
103 hash_del(&sst_cmd->hnode);
109 * isst_store_cmd() - Store command to a hash table
110 * @cmd: Mailbox command.
111 * @sub_cmd: Mailbox sub-command or MSR id.
112 * @mbox_cmd_type: Mailbox or MSR command.
113 * @param: Mailbox parameter.
114 * @data: Mailbox request data or MSR data.
116 * Stores the command to a hash table if there is no such command already
117 * stored. If already stored update the latest parameter and data for the
120 * Return: Return result of store to hash table, 0 for success, others for
123 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
126 struct isst_cmd *sst_cmd;
129 full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
130 full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
131 mutex_lock(&isst_hash_lock);
132 hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
133 if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
134 sst_cmd->mbox_cmd_type == mbox_cmd_type) {
135 sst_cmd->param = param;
136 sst_cmd->data = data;
137 mutex_unlock(&isst_hash_lock);
142 ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
143 mutex_unlock(&isst_hash_lock);
147 EXPORT_SYMBOL_GPL(isst_store_cmd);
149 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
150 struct isst_cmd *sst_cmd)
152 struct isst_if_mbox_cmd mbox_cmd;
155 mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
156 mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
157 mbox_cmd.parameter = sst_cmd->param;
158 mbox_cmd.req_data = sst_cmd->data;
159 mbox_cmd.logical_cpu = sst_cmd->cpu;
160 (cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
164 * isst_resume_common() - Process Resume request
166 * On resume replay all mailbox commands and MSRs.
170 void isst_resume_common(void)
172 struct isst_cmd *sst_cmd;
175 hash_for_each(isst_hash, i, sst_cmd, hnode) {
176 struct isst_if_cmd_cb *cb;
178 if (sst_cmd->mbox_cmd_type) {
179 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
181 isst_mbox_resume_command(cb, sst_cmd);
183 wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
188 EXPORT_SYMBOL_GPL(isst_resume_common);
190 static void isst_restore_msr_local(int cpu)
192 struct isst_cmd *sst_cmd;
195 mutex_lock(&isst_hash_lock);
196 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
197 if (!punit_msr_white_list[i])
200 hash_for_each_possible(isst_hash, sst_cmd, hnode,
201 punit_msr_white_list[i]) {
202 if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
203 wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
206 mutex_unlock(&isst_hash_lock);
210 * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
211 * @cmd: Pointer to the command structure to verify.
213 * Invalid command to PUNIT to may result in instability of the platform.
214 * This function has a whitelist of commands, which are allowed.
216 * Return: Return true if the command is invalid, else false.
218 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
222 if (cmd->logical_cpu >= nr_cpu_ids)
225 for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
226 if (cmd->command == isst_valid_cmds[i].cmd &&
227 (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
228 cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
235 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
238 * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
239 * @cmd: Pointer to the command structure to verify.
241 * Check if the given mail box level is set request and not a get request.
243 * Return: Return true if the command is set_req, else false.
245 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
249 for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
250 if (cmd->command == isst_cmd_set_reqs[i].cmd &&
251 cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
252 cmd->parameter == isst_cmd_set_reqs[i].param) {
259 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
261 static int isst_if_get_platform_info(void __user *argp)
263 struct isst_if_platform_info info;
265 info.api_version = ISST_IF_API_VERSION,
266 info.driver_version = ISST_IF_DRIVER_VERSION,
267 info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT,
268 info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
269 info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
271 if (copy_to_user(argp, &info, sizeof(info)))
278 struct isst_if_cpu_info {
279 /* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
284 static struct isst_if_cpu_info *isst_cpu_info;
287 * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
288 * @cpu: Logical CPU number.
289 * @bus_number: The bus number assigned by the hardware.
290 * @dev: The device number assigned by the hardware.
291 * @fn: The function number assigned by the hardware.
293 * Using cached bus information, find out the PCI device for a bus number,
294 * device and function.
296 * Return: Return pci_dev pointer or NULL.
298 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
302 if (bus_no < 0 || bus_no > 1 || cpu < 0 || cpu >= nr_cpu_ids ||
303 cpu >= num_possible_cpus())
306 bus_number = isst_cpu_info[cpu].bus_info[bus_no];
310 return pci_get_domain_bus_and_slot(0, bus_number, PCI_DEVFN(dev, fn));
312 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
314 static int isst_if_cpu_online(unsigned int cpu)
319 ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
321 /* This is not a fatal error on MSR mailbox only I/F */
322 isst_cpu_info[cpu].bus_info[0] = -1;
323 isst_cpu_info[cpu].bus_info[1] = -1;
325 isst_cpu_info[cpu].bus_info[0] = data & 0xff;
326 isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
329 ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
331 isst_cpu_info[cpu].punit_cpu_id = -1;
334 isst_cpu_info[cpu].punit_cpu_id = data;
336 isst_restore_msr_local(cpu);
341 static int isst_if_online_id;
343 static int isst_if_cpu_info_init(void)
347 isst_cpu_info = kcalloc(num_possible_cpus(),
348 sizeof(*isst_cpu_info),
353 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
354 "platform/x86/isst-if:online",
355 isst_if_cpu_online, NULL);
357 kfree(isst_cpu_info);
361 isst_if_online_id = ret;
366 static void isst_if_cpu_info_exit(void)
368 cpuhp_remove_state(isst_if_online_id);
369 kfree(isst_cpu_info);
372 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
374 struct isst_if_cpu_map *cpu_map;
376 cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
377 if (cpu_map->logical_cpu >= nr_cpu_ids ||
378 cpu_map->logical_cpu >= num_possible_cpus())
382 cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
387 static bool match_punit_msr_white_list(int msr)
391 for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
392 if (punit_msr_white_list[i] == msr)
399 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
401 struct isst_if_msr_cmd *msr_cmd;
404 msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
406 if (!match_punit_msr_white_list(msr_cmd->msr))
409 if (msr_cmd->logical_cpu >= nr_cpu_ids)
412 if (msr_cmd->read_write) {
413 if (!capable(CAP_SYS_ADMIN))
416 ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
421 ret = isst_store_cmd(0, msr_cmd->msr,
422 msr_cmd->logical_cpu,
423 0, 0, msr_cmd->data);
427 ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
428 msr_cmd->msr, &data);
430 msr_cmd->data = data;
439 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
441 unsigned char __user *ptr;
447 /* Each multi command has u32 command count as the first field */
448 if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
451 if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
454 cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
458 /* cb->offset points to start of the command after the command count */
459 ptr = argp + cb->offset;
461 for (i = 0; i < cmd_count; ++i) {
464 if (signal_pending(current)) {
469 if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
474 ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
478 if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
491 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
494 void __user *argp = (void __user *)arg;
495 struct isst_if_cmd_cb cmd_cb;
496 struct isst_if_cmd_cb *cb;
500 case ISST_IF_GET_PLATFORM_INFO:
501 ret = isst_if_get_platform_info(argp);
503 case ISST_IF_GET_PHY_ID:
504 cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
505 cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
506 cmd_cb.cmd_callback = isst_if_proc_phyid_req;
507 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
510 cb = &punit_callbacks[ISST_IF_DEV_MMIO];
512 ret = isst_if_exec_multi_cmd(argp, cb);
514 case ISST_IF_MBOX_COMMAND:
515 cb = &punit_callbacks[ISST_IF_DEV_MBOX];
517 ret = isst_if_exec_multi_cmd(argp, cb);
519 case ISST_IF_MSR_COMMAND:
520 cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
521 cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
522 cmd_cb.cmd_callback = isst_if_msr_cmd_req;
523 ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
532 static DEFINE_MUTEX(punit_misc_dev_lock);
533 static int misc_usage_count;
534 static int misc_device_ret;
535 static int misc_device_open;
537 static int isst_if_open(struct inode *inode, struct file *file)
541 /* Fail open, if a module is going away */
542 mutex_lock(&punit_misc_dev_lock);
543 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
544 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
546 if (cb->registered && !try_module_get(cb->owner)) {
554 for (j = 0; j < i; ++j) {
555 struct isst_if_cmd_cb *cb;
557 cb = &punit_callbacks[j];
559 module_put(cb->owner);
564 mutex_unlock(&punit_misc_dev_lock);
569 static int isst_if_relase(struct inode *inode, struct file *f)
573 mutex_lock(&punit_misc_dev_lock);
575 for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
576 struct isst_if_cmd_cb *cb = &punit_callbacks[i];
579 module_put(cb->owner);
581 mutex_unlock(&punit_misc_dev_lock);
586 static const struct file_operations isst_if_char_driver_ops = {
587 .open = isst_if_open,
588 .unlocked_ioctl = isst_if_def_ioctl,
589 .release = isst_if_relase,
592 static struct miscdevice isst_if_char_driver = {
593 .minor = MISC_DYNAMIC_MINOR,
594 .name = "isst_interface",
595 .fops = &isst_if_char_driver_ops,
599 * isst_if_cdev_register() - Register callback for IOCTL
600 * @device_type: The device type this callback handling.
601 * @cb: Callback structure.
603 * This function registers a callback to device type. On very first call
604 * it will register a misc device, which is used for user kernel interface.
605 * Other calls simply increment ref count. Registry will fail, if the user
606 * already opened misc device for operation. Also if the misc device
607 * creation failed, then it will not try again and all callers will get
610 * Return: Return the return value from the misc creation device or -EINVAL
611 * for unsupported device type.
613 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
616 return misc_device_ret;
618 if (device_type >= ISST_IF_DEV_MAX)
621 mutex_lock(&punit_misc_dev_lock);
622 if (misc_device_open) {
623 mutex_unlock(&punit_misc_dev_lock);
626 if (!misc_usage_count) {
629 misc_device_ret = misc_register(&isst_if_char_driver);
633 ret = isst_if_cpu_info_init();
635 misc_deregister(&isst_if_char_driver);
636 misc_device_ret = ret;
640 memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
641 punit_callbacks[device_type].registered = 1;
644 mutex_unlock(&punit_misc_dev_lock);
646 return misc_device_ret;
648 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
651 * isst_if_cdev_unregister() - Unregister callback for IOCTL
652 * @device_type: The device type to unregister.
654 * This function unregisters the previously registered callback. If this
655 * is the last callback unregistering, then misc device is removed.
659 void isst_if_cdev_unregister(int device_type)
661 mutex_lock(&punit_misc_dev_lock);
663 punit_callbacks[device_type].registered = 0;
664 if (device_type == ISST_IF_DEV_MBOX)
666 if (!misc_usage_count && !misc_device_ret) {
667 misc_deregister(&isst_if_char_driver);
668 isst_if_cpu_info_exit();
670 mutex_unlock(&punit_misc_dev_lock);
672 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
674 MODULE_LICENSE("GPL v2");