1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2018 Mellanox Technologies
4 #include <linux/hyperv.h>
7 #include "lib/hv_vhca.h"
10 struct mlx5_core_dev *dev;
11 struct workqueue_struct *work_queue;
12 struct mlx5_hv_vhca_agent *agents[MLX5_HV_VHCA_AGENT_MAX];
13 struct mutex agents_lock; /* Protect agents array */
16 struct mlx5_hv_vhca_work {
17 struct work_struct invalidate_work;
18 struct mlx5_hv_vhca *hv_vhca;
22 struct mlx5_hv_vhca_data_block {
29 struct mlx5_hv_vhca_agent {
30 enum mlx5_hv_vhca_agent_type type;
31 struct mlx5_hv_vhca *hv_vhca;
34 void (*control)(struct mlx5_hv_vhca_agent *agent,
35 struct mlx5_hv_vhca_control_block *block);
36 void (*invalidate)(struct mlx5_hv_vhca_agent *agent,
38 void (*cleanup)(struct mlx5_hv_vhca_agent *agent);
41 struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
43 struct mlx5_hv_vhca *hv_vhca = NULL;
45 hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
47 return ERR_PTR(-ENOMEM);
49 hv_vhca->work_queue = create_singlethread_workqueue("mlx5_hv_vhca");
50 if (!hv_vhca->work_queue) {
52 return ERR_PTR(-ENOMEM);
56 mutex_init(&hv_vhca->agents_lock);
61 void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
63 if (IS_ERR_OR_NULL(hv_vhca))
66 destroy_workqueue(hv_vhca->work_queue);
70 static void mlx5_hv_vhca_invalidate_work(struct work_struct *work)
72 struct mlx5_hv_vhca_work *hwork;
73 struct mlx5_hv_vhca *hv_vhca;
76 hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
77 hv_vhca = hwork->hv_vhca;
79 mutex_lock(&hv_vhca->agents_lock);
80 for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
81 struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
83 if (!agent || !agent->invalidate)
86 if (!(BIT(agent->type) & hwork->block_mask))
89 agent->invalidate(agent, hwork->block_mask);
91 mutex_unlock(&hv_vhca->agents_lock);
96 void mlx5_hv_vhca_invalidate(void *context, u64 block_mask)
98 struct mlx5_hv_vhca *hv_vhca = (struct mlx5_hv_vhca *)context;
99 struct mlx5_hv_vhca_work *work;
101 work = kzalloc(sizeof(*work), GFP_ATOMIC);
105 INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
106 work->hv_vhca = hv_vhca;
107 work->block_mask = block_mask;
109 queue_work(hv_vhca->work_queue, &work->invalidate_work);
112 #define AGENT_MASK(type) (type ? BIT(type - 1) : 0 /* control */)
114 static void mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca *hv_vhca,
115 struct mlx5_hv_vhca_control_block *block)
119 for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
120 struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
122 if (!agent || !agent->control)
125 if (!(AGENT_MASK(agent->type) & block->control))
128 agent->control(agent, block);
132 static void mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca *hv_vhca,
137 for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
138 struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
141 *capabilities |= AGENT_MASK(agent->type);
146 mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent *agent,
149 struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
150 struct mlx5_core_dev *dev = hv_vhca->dev;
151 struct mlx5_hv_vhca_control_block *block;
152 u32 capabilities = 0;
155 block = kzalloc(sizeof(*block), GFP_KERNEL);
159 err = mlx5_hv_read_config(dev, block, sizeof(*block), 0);
163 mlx5_hv_vhca_capabilities(hv_vhca, &capabilities);
165 /* In case no capabilities, send empty block in return */
167 memset(block, 0, sizeof(*block));
171 if (block->capabilities != capabilities)
172 block->capabilities = capabilities;
174 if (block->control & ~capabilities)
177 mlx5_hv_vhca_agents_control(hv_vhca, block);
178 block->command_ack = block->command;
181 mlx5_hv_write_config(dev, block, sizeof(*block), 0);
187 static struct mlx5_hv_vhca_agent *
188 mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca *hv_vhca)
190 return mlx5_hv_vhca_agent_create(hv_vhca, MLX5_HV_VHCA_AGENT_CONTROL,
192 mlx5_hv_vhca_control_agent_invalidate,
196 static void mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent *agent)
198 mlx5_hv_vhca_agent_destroy(agent);
201 int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
203 struct mlx5_hv_vhca_agent *agent;
206 if (IS_ERR_OR_NULL(hv_vhca))
207 return IS_ERR_OR_NULL(hv_vhca);
209 err = mlx5_hv_register_invalidate(hv_vhca->dev, hv_vhca,
210 mlx5_hv_vhca_invalidate);
214 agent = mlx5_hv_vhca_control_agent_create(hv_vhca);
215 if (IS_ERR_OR_NULL(agent)) {
216 mlx5_hv_unregister_invalidate(hv_vhca->dev);
217 return IS_ERR_OR_NULL(agent);
220 hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL] = agent;
225 void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
227 struct mlx5_hv_vhca_agent *agent;
230 if (IS_ERR_OR_NULL(hv_vhca))
233 agent = hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL];
235 mlx5_hv_vhca_control_agent_destroy(agent);
237 mutex_lock(&hv_vhca->agents_lock);
238 for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++)
239 WARN_ON(hv_vhca->agents[i]);
241 mutex_unlock(&hv_vhca->agents_lock);
243 mlx5_hv_unregister_invalidate(hv_vhca->dev);
246 static void mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca *hv_vhca)
248 mlx5_hv_vhca_invalidate(hv_vhca, BIT(MLX5_HV_VHCA_AGENT_CONTROL));
251 struct mlx5_hv_vhca_agent *
252 mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
253 enum mlx5_hv_vhca_agent_type type,
254 void (*control)(struct mlx5_hv_vhca_agent*,
255 struct mlx5_hv_vhca_control_block *block),
256 void (*invalidate)(struct mlx5_hv_vhca_agent*,
258 void (*cleaup)(struct mlx5_hv_vhca_agent *agent),
261 struct mlx5_hv_vhca_agent *agent;
263 if (IS_ERR_OR_NULL(hv_vhca))
264 return ERR_PTR(-ENOMEM);
266 if (type >= MLX5_HV_VHCA_AGENT_MAX)
267 return ERR_PTR(-EINVAL);
269 mutex_lock(&hv_vhca->agents_lock);
270 if (hv_vhca->agents[type]) {
271 mutex_unlock(&hv_vhca->agents_lock);
272 return ERR_PTR(-EINVAL);
274 mutex_unlock(&hv_vhca->agents_lock);
276 agent = kzalloc(sizeof(*agent), GFP_KERNEL);
278 return ERR_PTR(-ENOMEM);
281 agent->hv_vhca = hv_vhca;
283 agent->control = control;
284 agent->invalidate = invalidate;
285 agent->cleanup = cleaup;
287 mutex_lock(&hv_vhca->agents_lock);
288 hv_vhca->agents[type] = agent;
289 mutex_unlock(&hv_vhca->agents_lock);
291 mlx5_hv_vhca_agents_update(hv_vhca);
296 void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
298 struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
300 mutex_lock(&hv_vhca->agents_lock);
302 if (WARN_ON(agent != hv_vhca->agents[agent->type])) {
303 mutex_unlock(&hv_vhca->agents_lock);
307 hv_vhca->agents[agent->type] = NULL;
308 mutex_unlock(&hv_vhca->agents_lock);
311 agent->cleanup(agent);
315 mlx5_hv_vhca_agents_update(hv_vhca);
318 static int mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent *agent,
319 struct mlx5_hv_vhca_data_block *data_block,
320 void *src, int len, int *offset)
322 int bytes = min_t(int, (int)sizeof(data_block->data), len);
324 data_block->sequence = agent->seq;
325 data_block->offset = (*offset)++;
326 memcpy(data_block->data, src, bytes);
331 static void mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent *agent)
336 int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
339 int offset = agent->type * HV_CONFIG_BLOCK_SIZE_MAX;
340 int block_offset = 0;
345 struct mlx5_hv_vhca_data_block data_block = {0};
348 bytes = mlx5_hv_vhca_data_block_prepare(agent, &data_block,
354 err = mlx5_hv_write_config(agent->hv_vhca->dev, &data_block,
355 sizeof(data_block), offset);
363 mlx5_hv_vhca_agent_seq_update(agent);
368 void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent)