can: kvaser_pciefd: Force IRQ edge in case of nested IRQ

commit 9176bd205ee0b2cd35073a9973c2a0936bcb579e upstream.

Avoid the driver missing IRQs by temporarily masking IRQs in the ISR
to enforce an edge even if a different IRQ is signalled before handled
IRQs are cleared.

Fixes: 48f827d4f48f ("can: kvaser_pciefd: Move reset of DMA RX buffers to the end of the ISR")
Cc: stable@vger.kernel.org
Signed-off-by: Axel Forsman <axfo@kvaser.com>
Tested-by: Jimmy Assarsson <extja@kvaser.com>
Reviewed-by: Jimmy Assarsson <extja@kvaser.com>
Link: https://patch.msgid.link/20250520114332.8961-2-axfo@kvaser.com
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Axel Forsman
2025-05-20 13:43:30 +02:00
committed by Greg Kroah-Hartman
parent f968f28cd1
commit 5a9c0d5cbd

View File

@@ -1582,24 +1582,28 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
return res;
}
static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
void __iomem *srb_cmd_reg = KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG;
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
kvaser_pciefd_read_buffer(pcie, 0);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
kvaser_pciefd_read_buffer(pcie, 0);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, srb_cmd_reg); /* Rearm buffer */
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
kvaser_pciefd_read_buffer(pcie, 1);
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, srb_cmd_reg); /* Rearm buffer */
}
if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1627,29 +1631,22 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
u32 srb_irq = 0;
u32 srb_release = 0;
int i;
if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
if (pci_irq & irq_mask->kcan_rx0)
srb_irq = kvaser_pciefd_receive_irq(pcie);
kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB0;
if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
srb_release |= KVASER_PCIEFD_SRB_CMD_RDB1;
if (srb_release)
iowrite32(srb_release, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
return IRQ_HANDLED;
}
@@ -1669,13 +1666,22 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
}
}
static void kvaser_pciefd_disable_irq_srcs(struct kvaser_pciefd *pcie)
{
unsigned int i;
/* Masking PCI_IRQ is insufficient as running ISR will unmask it */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
for (i = 0; i < pcie->nr_channels; ++i)
iowrite32(0, pcie->can[i]->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
}
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int err;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
void __iomem *irq_en_base;
pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -1728,8 +1734,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG);
/* Enable PCI interrupts */
irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie);
iowrite32(irq_mask->all, irq_en_base);
iowrite32(irq_mask->all, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
/* Ready the DMA buffers */
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
@@ -1743,8 +1748,7 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
return 0;
err_free_irq:
/* Disable PCI interrupts */
iowrite32(0, irq_en_base);
kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
err_teardown_can_ctrls:
@@ -1764,35 +1768,25 @@ err_disable_pci:
return err;
}
static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
{
int i;
for (i = 0; i < pcie->nr_channels; i++) {
struct kvaser_pciefd_can *can = pcie->can[i];
if (can) {
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
unregister_candev(can->can.dev);
del_timer(&can->bec_poll_timer);
kvaser_pciefd_pwm_stop(can);
free_candev(can->can.dev);
}
}
}
static void kvaser_pciefd_remove(struct pci_dev *pdev)
{
struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
unsigned int i;
kvaser_pciefd_remove_all_ctrls(pcie);
for (i = 0; i < pcie->nr_channels; ++i) {
struct kvaser_pciefd_can *can = pcie->can[i];
/* Disable interrupts */
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
unregister_candev(can->can.dev);
del_timer(&can->bec_poll_timer);
kvaser_pciefd_pwm_stop(can);
}
kvaser_pciefd_disable_irq_srcs(pcie);
free_irq(pcie->pci->irq, pcie);
for (i = 0; i < pcie->nr_channels; ++i)
free_candev(pcie->can[i]->can.dev);
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);