usb: ucsi: Ensure connector delayed work items are flushed
[ Upstream commit fac4b8633fd682ecc8e9cff61cb3e33374a1c7e5 ] During ucsi_unregister() when destroying a connector's workqueue, there may still be pending delayed work items that haven't been scheduled yet. Because queue_delayed_work() uses a separate timer to schedule a work item, the destroy_workqueue() call is not aware of any pending items. Hence when a pending item's timer expires it would then try to queue on a dangling workqueue pointer. Fix this by keeping track of all work items in a list, so that prior to destroying the workqueue any pending items can be flushed. Do this by calling mod_delayed_work() as that will cause pending items to get queued immediately, which then allows the ensuing destroy_workqueue() to implicitly drain all currently queued items to completion and free themselves. Fixes: b9aa02ca39a4 ("usb: typec: ucsi: Add polling mechanism for partner tasks like alt mode checking") Suggested-by: Heikki Krogerus <heikki.krogerus@linux.intel.com> Co-developed-by: Linyu Yuan <quic_linyyuan@quicinc.com> Signed-off-by: Linyu Yuan <quic_linyyuan@quicinc.com> Signed-off-by: Jack Pham <quic_jackp@quicinc.com> Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com> Link: https://lore.kernel.org/r/20230110071218.26261-1-quic_jackp@quicinc.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
94fa550b95
commit
3d7f77e55d
@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
|
|||||||
|
|
||||||
struct ucsi_work {
|
struct ucsi_work {
|
||||||
struct delayed_work work;
|
struct delayed_work work;
|
||||||
|
struct list_head node;
|
||||||
unsigned long delay;
|
unsigned long delay;
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
struct ucsi_connector *con;
|
struct ucsi_connector *con;
|
||||||
@ -202,6 +203,7 @@ static void ucsi_poll_worker(struct work_struct *work)
|
|||||||
mutex_lock(&con->lock);
|
mutex_lock(&con->lock);
|
||||||
|
|
||||||
if (!con->partner) {
|
if (!con->partner) {
|
||||||
|
list_del(&uwork->node);
|
||||||
mutex_unlock(&con->lock);
|
mutex_unlock(&con->lock);
|
||||||
kfree(uwork);
|
kfree(uwork);
|
||||||
return;
|
return;
|
||||||
@ -209,10 +211,12 @@ static void ucsi_poll_worker(struct work_struct *work)
|
|||||||
|
|
||||||
ret = uwork->cb(con);
|
ret = uwork->cb(con);
|
||||||
|
|
||||||
if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
|
if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
|
||||||
queue_delayed_work(con->wq, &uwork->work, uwork->delay);
|
queue_delayed_work(con->wq, &uwork->work, uwork->delay);
|
||||||
else
|
} else {
|
||||||
|
list_del(&uwork->node);
|
||||||
kfree(uwork);
|
kfree(uwork);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&con->lock);
|
mutex_unlock(&con->lock);
|
||||||
}
|
}
|
||||||
@ -236,6 +240,7 @@ static int ucsi_partner_task(struct ucsi_connector *con,
|
|||||||
uwork->con = con;
|
uwork->con = con;
|
||||||
uwork->cb = cb;
|
uwork->cb = cb;
|
||||||
|
|
||||||
|
list_add_tail(&uwork->node, &con->partner_tasks);
|
||||||
queue_delayed_work(con->wq, &uwork->work, delay);
|
queue_delayed_work(con->wq, &uwork->work, delay);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1056,6 +1061,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
|
|||||||
INIT_WORK(&con->work, ucsi_handle_connector_change);
|
INIT_WORK(&con->work, ucsi_handle_connector_change);
|
||||||
init_completion(&con->complete);
|
init_completion(&con->complete);
|
||||||
mutex_init(&con->lock);
|
mutex_init(&con->lock);
|
||||||
|
INIT_LIST_HEAD(&con->partner_tasks);
|
||||||
con->num = index + 1;
|
con->num = index + 1;
|
||||||
con->ucsi = ucsi;
|
con->ucsi = ucsi;
|
||||||
|
|
||||||
@ -1420,8 +1426,20 @@ void ucsi_unregister(struct ucsi *ucsi)
|
|||||||
ucsi_unregister_altmodes(&ucsi->connector[i],
|
ucsi_unregister_altmodes(&ucsi->connector[i],
|
||||||
UCSI_RECIPIENT_CON);
|
UCSI_RECIPIENT_CON);
|
||||||
ucsi_unregister_port_psy(&ucsi->connector[i]);
|
ucsi_unregister_port_psy(&ucsi->connector[i]);
|
||||||
if (ucsi->connector[i].wq)
|
|
||||||
|
if (ucsi->connector[i].wq) {
|
||||||
|
struct ucsi_work *uwork;
|
||||||
|
|
||||||
|
mutex_lock(&ucsi->connector[i].lock);
|
||||||
|
/*
|
||||||
|
* queue delayed items immediately so they can execute
|
||||||
|
* and free themselves before the wq is destroyed
|
||||||
|
*/
|
||||||
|
list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
|
||||||
|
mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
|
||||||
|
mutex_unlock(&ucsi->connector[i].lock);
|
||||||
destroy_workqueue(ucsi->connector[i].wq);
|
destroy_workqueue(ucsi->connector[i].wq);
|
||||||
|
}
|
||||||
typec_unregister_port(ucsi->connector[i].port);
|
typec_unregister_port(ucsi->connector[i].port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -322,6 +322,7 @@ struct ucsi_connector {
|
|||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct completion complete;
|
struct completion complete;
|
||||||
struct workqueue_struct *wq;
|
struct workqueue_struct *wq;
|
||||||
|
struct list_head partner_tasks;
|
||||||
|
|
||||||
struct typec_port *port;
|
struct typec_port *port;
|
||||||
struct typec_partner *partner;
|
struct typec_partner *partner;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user