diff --git a/Documentation/devicetree/bindings/dma/phytium-ddma.yaml b/Documentation/devicetree/bindings/dma/phytium-ddma.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7dbe7c44ff5477b71c34ebfef8d269f8e662e53c --- /dev/null +++ b/Documentation/devicetree/bindings/dma/phytium-ddma.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +title: Phytium DDMA Controller bindings + +description: + The Phytium DDMA is a general-purpose direct memory access controller capable of + supporting 8 independent DMA channels. Each channel can have up to 32 requests. + DMA clients connected to the Phytium DDMA controller must use the format + described in the dma.txt file, using a two-cell specifier for each + channel: a phandle to the DMA controller plus the following two integer cells: + 1. The channel id + 2. The request line number + +maintainers: + - Huang Jie + +allOf: + - $ref: "dma-controller.yaml#" + +properties: + "#dma-cells": + const: 2 + + compatible: + const: phytium,ddma + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + dma-channels: + minItems: 1 + maxItems: 8 + description: it indicates that the number of channels are used + +required: + - compatible + - reg + - interrupts + - dma-channels + +unevaluatedProperties: false + +examples: + ddma0: ddma@28003000 { + compatible = "phytium,ddma"; + reg = <0x0 0x28003000 0x0 0x1000>; + interrupts = ; + #dma-cells = <2>; + dma-channels = <8>; + }; +... diff --git a/Documentation/devicetree/bindings/edac/phytium-e2000-edac.txt b/Documentation/devicetree/bindings/edac/phytium-e2000-edac.txt new file mode 100644 index 0000000000000000000000000000000000000000..6f49a13d88bdeff30a1ec6481f511bea51e841ee --- /dev/null +++ b/Documentation/devicetree/bindings/edac/phytium-e2000-edac.txt @@ -0,0 +1,23 @@ +* Phytium E2000 SoC EDAC node + +EDAC node is defined to describe on-chip error detection and correction. +The follow error types are supported: + + SoC - SoC IP's such as Ethernet, SATA, and etc + +The following section describes the EDAC DT node binding. + +Required properties: +- compatible: Shall be "phytium,e2000-edac". +- reg: Shall be the E2000 RAS resource. +- interrupts: Interrupt-specifier for RAS error IRQ(s). + +Example: + edac: edac@32b28000 { + compatible = "phytium,e2000-edac"; + reg = <0x0 0x32b28000 0x0 0x1000>, + <0x0 0x31400000 0x0 0x1000>, + <0x0 0x31401000 0x0 0x1000>; + interrupts = , + , + }; diff --git a/arch/arm64/boot/dts/phytium/pe220x.dtsi b/arch/arm64/boot/dts/phytium/pe220x.dtsi index be8346c18242974895778982c9ac6a482f7464fc..291a92d4241d241257b779b88d9ac63c6690d8df 100644 --- a/arch/arm64/boot/dts/phytium/pe220x.dtsi +++ b/arch/arm64/boot/dts/phytium/pe220x.dtsi @@ -207,6 +207,22 @@ nand0: nand@28002000 { status = "disabled"; }; + ddma0: ddma@28003000 { + compatible = "phytium,ddma"; + reg = <0x0 0x28003000 0x0 0x1000>; + interrupts = ; + #dma-cells = <2>; + dma-channels = <8>; + }; + + ddma1: ddma@28004000 { + compatible = "phytium,ddma"; + reg = <0x0 0x28004000 0x0 0x1000>; + interrupts = ; + #dma-cells = <2>; + dma-channels = <8>; + }; + qspi0: spi@28008000 { compatible = "phytium,qspi"; reg = <0x0 0x28008000 0x0 0x1000>, @@ -972,5 +988,14 @@ pcie: pcie@40000000 { status = "disabled"; }; + edac: edac@32b28000 { + compatible = "phytium,e2000-edac"; + reg = <0x0 0x32b28000 0x0 0x1000>, + <0x0 0x31400000 0x0 0x1000>, + <0x0 0x31401000 0x0 0x1000>; + interrupts = , + ; + status = "disabled"; + }; }; }; diff --git a/arch/arm64/configs/phytium_defconfig b/arch/arm64/configs/phytium_defconfig index 507a876693dc8ae2f17d68d64ed9ee8f232a3e3a..85a3f657ef0e82023cbfcba01051c7ca176b3924 100644 --- a/arch/arm64/configs/phytium_defconfig +++ b/arch/arm64/configs/phytium_defconfig @@ -572,6 +572,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y CONFIG_LEDS_TRIGGER_PANIC=y CONFIG_EDAC=y CONFIG_EDAC_GHES=y +CONFIG_EDAC_PHYTIUM=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_DS1307=m CONFIG_RTC_DRV_MAX77686=y @@ -591,6 +592,7 @@ CONFIG_BCM_SBA_RAID=m CONFIG_FSL_EDMA=y CONFIG_MV_XOR_V2=y CONFIG_PL330_DMA=y +CONFIG_PHYTIUM_DDMA=y CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 08013345d1f2415f5d0af1576749087f768766ca..de736b328283a3024c6375e849c263b1b5c82372 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -710,6 +710,13 @@ config ZX_DMA help Support the DMA engine for ZTE ZX family platform devices. +config PHYTIUM_DDMA + bool "Phytium PE220x DDMA support" + depends on (ARCH_PHYTIUM || COMPILE_TEST) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for Phytium PE220x DDMA controller. # driver files source "drivers/dma/bestcomm/Kconfig" diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 948a8da05f8b6829c498d9961b9c3e8dfc665586..e5cef402aaf68f96cc0b95bc3f4feb1bcf22e9b8 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -87,3 +87,4 @@ obj-y += mediatek/ obj-y += qcom/ obj-y += ti/ obj-y += xilinx/ +obj-y += phytium/ diff --git a/drivers/dma/phytium/Makefile b/drivers/dma/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..71ba9b9fcd4062f7841be505dde68dde679288e0 --- /dev/null +++ b/drivers/dma/phytium/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_PHYTIUM_DDMA) += phytium-ddmac.o diff --git a/drivers/dma/phytium/phytium-ddmac.c b/drivers/dma/phytium/phytium-ddmac.c new file mode 100644 index 0000000000000000000000000000000000000000..9ff66bdc96e5821fed02b3379955bdbdb67de64b --- /dev/null +++ b/drivers/dma/phytium/phytium-ddmac.c @@ -0,0 +1,945 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Device DDMA Controller driver. + * + * Copyright (c) 2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium-ddmac.h" + + +static inline struct phytium_ddma_device *to_ddma_device(struct dma_chan *chan) +{ + return container_of(chan->device, struct phytium_ddma_device, dma_dev); +} + +static inline struct phytium_ddma_chan *to_ddma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct phytium_ddma_chan, vchan.chan); +} + +static inline struct phytium_ddma_desc *to_ddma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct phytium_ddma_desc, vdesc); +} + +static inline struct device *chan_to_dev(struct phytium_ddma_chan *chan) +{ + return chan->vchan.chan.device->dev; +} + +static inline struct phytium_ddma_device *chan_to_ddma( + struct phytium_ddma_chan *chan) +{ + return to_ddma_device(&chan->vchan.chan); +} + +static inline void phytium_ddma_iowrite32( + const struct phytium_ddma_device *ddma, + const u32 reg, const u32 val) +{ + iowrite32(val, ddma->base + reg); +} + +static inline u32 phytium_ddma_ioread32(const struct phytium_ddma_device *ddma, + const u32 reg) +{ + return ioread32(ddma->base + reg); +} + +static inline void phytium_chan_iowrite32(const struct phytium_ddma_chan *chan, + const u32 reg, const u32 val) +{ + iowrite32(val, chan->base + reg); +} + +static inline u32 phytium_chan_ioread32(const struct phytium_ddma_chan *chan, + const u32 reg) +{ + return ioread32(chan->base + reg); +} + +static void phytium_ddma_disable(const struct phytium_ddma_device *ddma) +{ + dev_dbg(ddma->dev, "ddma disable\n"); + phytium_ddma_iowrite32(ddma, DMA_CTL, !DMA_CTL_EN); +} + +static void phytium_ddma_enable(const struct phytium_ddma_device *ddma) +{ + dev_dbg(ddma->dev, "ddma enable\n"); + phytium_ddma_iowrite32(ddma, DMA_CTL, DMA_CTL_EN); +} + +static void phytium_ddma_reset(const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + dev_dbg(ddma->dev, "dma reset\n"); + val = phytium_ddma_ioread32(ddma, DMA_CTL); + val |= DMA_CTL_SRST; + phytium_ddma_iowrite32(ddma, DMA_CTL, val); + + udelay(10); + val &= ~DMA_CTL_SRST; + phytium_ddma_iowrite32(ddma, DMA_CTL, val); +} + +static void phytium_ddma_irq_disable(const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + dev_dbg(ddma->dev, "ddma irq disable\n"); + val = phytium_ddma_ioread32(ddma, DMA_MASK_INT); + val |= DMA_INT_EN; + phytium_ddma_iowrite32(ddma, DMA_MASK_INT, val); +} + +static void phytium_ddma_irq_enable(const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + dev_dbg(ddma->dev, "ddma irq enable\n"); + val = phytium_ddma_ioread32(ddma, DMA_MASK_INT); + val &= ~DMA_INT_EN; + phytium_ddma_iowrite32(ddma, DMA_MASK_INT, val); +} + +static u32 phytium_ddma_irq_read(const struct phytium_ddma_device *ddma) +{ + u32 val = 0; + + val = phytium_ddma_ioread32(ddma, DMA_STAT); + + return val; +} + +static void phytium_chan_irq_disable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq disable\n", chan->id); + val = phytium_ddma_ioread32(chan_to_ddma(chan), DMA_MASK_INT); + val |= DMA_INT_CHAL_EN(chan->id); + phytium_ddma_iowrite32(chan_to_ddma(chan), DMA_MASK_INT, val); +} + +static void phytium_chan_irq_enable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq enable\n", chan->id); + val = phytium_ddma_ioread32(chan_to_ddma(chan), DMA_MASK_INT); + val &= ~DMA_INT_CHAL_EN(chan->id); + phytium_ddma_iowrite32(chan_to_ddma(chan), DMA_MASK_INT, val); +} + +static void phytium_chan_irq_clear(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d irq clear\n", chan->id); + val = DMA_STAT_CHAL(chan->id); + phytium_ddma_iowrite32(chan_to_ddma(chan), DMA_STAT, val); +} + +static int phytium_chan_disable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + int ret = 0; + + dev_dbg(chan_to_dev(chan), "channel %d disable\n", chan->id); + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + if (val | DMA_CHAL_EN) { + val &= ~DMA_CHAL_EN; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); + + ret = readl_relaxed_poll_timeout_atomic( + chan->base + DMA_CHALX_CTL, val, + !(val & DMA_CHAL_EN), 0, 100000); + } + return ret; +} + +static void phytium_chan_enable(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d enable\n", chan->id); + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + val |= DMA_CHAL_EN; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); +} + +static bool phytium_chan_is_running(const struct phytium_ddma_chan *chan) +{ + u32 val; + + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + + if (val & DMA_CHAL_EN) + return true; + else + return false; +} + +static void phytium_chan_reset(struct phytium_ddma_chan *chan) +{ + u32 val = 0; + + dev_dbg(chan_to_dev(chan), "channel %d reset\n", chan->id); + val = phytium_chan_ioread32(chan, DMA_CHALX_CTL); + val |= DMA_CHAL_SRST; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); + + udelay(10); + val &= ~DMA_CHAL_SRST; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, val); +} + +static void phytium_ddma_vdesc_free(struct virt_dma_desc *vd) +{ + kfree(to_ddma_desc(vd)); +} + +static int phytium_chan_pause(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + int ret = 0; + + ret = phytium_chan_disable(pchan); + pchan->busy = false; + pchan->is_pasued = true; + + return ret; +} + +static int phytium_chan_resume(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + + phytium_chan_enable(pchan); + pchan->is_pasued = false; + + return 0; +} + +static void phytium_chan_start_xfer(struct phytium_ddma_chan *chan) +{ + struct virt_dma_desc *vdesc = NULL; + struct phytium_ddma_sg_req *sg_req = NULL; + char *tmp = NULL; + int i = 0; + unsigned long flags = 0; + + /* chan first xfer settings */ + if (!chan->desc) { + vdesc = vchan_next_desc(&chan->vchan); + if (!vdesc) + return; + + list_del(&vdesc->node); + chan->desc = to_ddma_desc(vdesc); + chan->next_sg = 0; + chan->current_sg = NULL; + dev_dbg(chan_to_dev(chan), "xfer start\n"); + } + + if (chan->next_sg == chan->desc->num_sgs) + chan->next_sg = 0; + + sg_req = &chan->desc->sg_req[chan->next_sg]; + chan->current_sg = sg_req; + /* fill to 4 bytes */ + switch (sg_req->direction) { + case DMA_MEM_TO_DEV: + tmp = phys_to_virt(sg_req->mem_addr_l); + memset(chan->buf, 0, sg_req->len * 4); + for (i = 0; i < sg_req->len; i++) + chan->buf[i * 4] = tmp[i]; + break; + + case DMA_DEV_TO_MEM: + memset(chan->buf, 0, sg_req->len * 4); + break; + + default: + break; + } + + /* start transfer */ + phytium_chan_iowrite32(chan, DMA_CHALX_DDR_LWADDR, + chan->paddr & 0xFFFFFFFF); + phytium_chan_iowrite32(chan, DMA_CHALX_DDR_UPADDR, + (chan->paddr >> 32) & 0xFFFFFFFF); + phytium_chan_iowrite32(chan, DMA_CHALX_DEV_ADDR, sg_req->dev_addr); + phytium_chan_iowrite32(chan, DMA_CHALX_TS, sg_req->len * 4); + + spin_lock_irqsave(&chan_to_ddma(chan)->lock, flags); + phytium_chan_irq_enable(chan); + spin_unlock_irqrestore(&chan_to_ddma(chan)->lock, flags); + phytium_chan_enable(chan); + + chan->next_sg++; + chan->busy = true; +} + +static void phytium_chan_xfer_done(struct phytium_ddma_chan *chan) +{ + struct phytium_ddma_sg_req *sg_req = chan->current_sg; + char *tmp = NULL; + int i = 0; + + if (chan->desc) { + if (sg_req->direction == DMA_DEV_TO_MEM) { + tmp = phys_to_virt(sg_req->mem_addr_l); + for (i = 0; i < sg_req->len; i++) + tmp[i] = chan->buf[i * 4]; + } + + chan->busy = false; + if (chan->next_sg == chan->desc->num_sgs) { + dev_dbg(chan_to_dev(chan), "xfer complete\n"); + vchan_cookie_complete(&chan->desc->vdesc); + chan->desc = NULL; + chan->current_sg = NULL; + } + phytium_chan_disable(chan); + phytium_chan_irq_clear(chan); + phytium_chan_start_xfer(chan); + } +} + +static void phytium_dma_hw_init(struct phytium_ddma_device *ddma) +{ + u32 i = 0; + int ret = 0; + + phytium_ddma_disable(ddma); + phytium_ddma_reset(ddma); + phytium_ddma_irq_enable(ddma); + phytium_ddma_enable(ddma); + + for (i = 0; i < ddma->dma_channels; i++) { + phytium_chan_irq_disable(&ddma->chan[i]); + ret = phytium_chan_disable(&ddma->chan[i]); + if (ret) + dev_err(ddma->dev, "can't disable channel %d\n", i); + } +} + +static size_t phytium_ddma_desc_residue(struct phytium_ddma_chan *chan) +{ + u32 trans_cnt = 0; + u32 residue = 0; + int i = 0; + + trans_cnt = phytium_chan_ioread32(chan, DMA_CHALX_TRANS_CNT); + residue = chan->current_sg->len - trans_cnt; + + for (i = chan->next_sg; i < chan->desc->num_sgs; i++) + residue += chan->desc->sg_req[i].len; + + return residue; +} + +static enum dma_status phytium_ddma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + struct virt_dma_desc *vd = NULL; + enum dma_status ret = 0; + unsigned long flags = 0; + size_t residue = 0; + + ret = dma_cookie_status(chan, cookie, txstate); + if ((ret == DMA_COMPLETE) || !txstate) + return ret; + + spin_lock_irqsave(&pchan->vchan.lock, flags); + vd = vchan_find_desc(&pchan->vchan, cookie); + if (pchan->desc && cookie == pchan->desc->vdesc.tx.cookie) + residue = phytium_ddma_desc_residue(pchan); + + dma_set_residue(txstate, residue); + spin_unlock_irqrestore(&pchan->vchan.lock, flags); + + if (pchan->is_pasued && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; + + return ret; +} + +static void phytium_ddma_issue_pending(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + unsigned long flags = 0; + + spin_lock_irqsave(&pchan->vchan.lock, flags); + + if (vchan_issue_pending(&pchan->vchan) && !pchan->desc && !pchan->busy) + phytium_chan_start_xfer(pchan); + + spin_unlock_irqrestore(&pchan->vchan.lock, flags); +} + +static int phytium_ddma_terminate_all(struct dma_chan *chan) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + unsigned long flags = 0; + LIST_HEAD(head); + + spin_lock_irqsave(&pchan->vchan.lock, flags); + if (pchan->desc) { + vchan_terminate_vdesc(&pchan->desc->vdesc); + if (pchan->busy) { + u32 tmp_ctl, timeout; + phytium_chan_disable(pchan); + /* save some registers, reset will clear it */ + timeout = phytium_chan_ioread32(pchan, + DMA_CHALX_TIMEOUT_CNT); + tmp_ctl = phytium_chan_ioread32(pchan, + DMA_CHALX_CTL); + spin_lock(&chan_to_ddma(pchan)->lock); + phytium_chan_irq_disable(pchan); + spin_unlock(&chan_to_ddma(pchan)->lock); + /* need reset when terminate */ + phytium_chan_reset(pchan); + phytium_chan_irq_clear(pchan); + /* recover it */ + phytium_chan_iowrite32(pchan, + DMA_CHALX_CTL, tmp_ctl); + phytium_chan_iowrite32(pchan, + DMA_CHALX_TIMEOUT_CNT, timeout); + pchan->busy = false; + } + pchan->desc = NULL; + } + + vchan_get_all_descriptors(&pchan->vchan, &head); + spin_unlock_irqrestore(&pchan->vchan.lock, flags); + vchan_dma_desc_free_list(&pchan->vchan, &head); + + return 0; +} + +static int phytium_ddma_alloc_chan_resources(struct dma_chan *chan) +{ + struct phytium_ddma_device *ddma = to_ddma_device(chan); + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + u32 bind_status = 0; + int ret = 0; + unsigned long flags = 0; + + bind_status = phytium_ddma_ioread32(ddma, DMA_CHAL_BIND); + + if ((pchan->is_used) || (bind_status & BIT(pchan->id))) { + dev_err(ddma->dev, "channel %d already used\n", pchan->id); + ret = -EBUSY; + goto out; + } + + /* prepare channel */ + ret = phytium_chan_disable(pchan); + if (ret) { + dev_err(ddma->dev, "can't disable channel %d\n", pchan->id); + goto out; + } + phytium_chan_reset(pchan); + phytium_chan_irq_clear(pchan); + + /* channel bind */ + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flags); + bind_status |= BIT(pchan->id); + phytium_ddma_iowrite32(ddma, DMA_CHAL_BIND, bind_status); + pchan->is_used = true; + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flags); + + /* alloc dma memory */ + pchan->buf = dma_alloc_coherent(ddma->dev, 4 * PAGE_SIZE, &pchan->paddr, + GFP_KERNEL); + if (!pchan->buf) { + ret = -EBUSY; + dev_err(ddma->dev, "failed to alloc dma memory\n"); + } + + dev_info(ddma->dev, "alloc channel %d\n", pchan->id); + +out: + return ret; +} + +static void phytium_ddma_free_chan_resources(struct dma_chan *chan) +{ + struct phytium_ddma_device *ddma = to_ddma_device(chan); + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + u32 bind_status = 0; + unsigned long flags = 0; + + if (!pchan->is_used) + return; + + dev_dbg(ddma->dev, "free channel %d\n", pchan->id); + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flags); + bind_status = phytium_ddma_ioread32(ddma, DMA_CHAL_BIND); + bind_status &= ~BIT(pchan->id); + phytium_ddma_iowrite32(ddma, DMA_CHAL_BIND, bind_status); + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flags); + + phytium_chan_disable(pchan); + + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flags); + phytium_chan_irq_disable(pchan); + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flags); + + vchan_free_chan_resources(to_virt_chan(chan)); + pchan->is_used = false; + + if (pchan->buf) + dma_free_coherent(ddma->dev, 4 * PAGE_SIZE, + pchan->buf, pchan->paddr); +} + +static int phytium_ddma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + u32 chal_cfg = 0; + u32 req_mode = 0; + const u32 timeout = 0xffff; + unsigned long flag = 0; + + /* Check if chan will be configured for slave transfers */ + if (!is_slave_direction(config->direction)) + return -EINVAL; + + memcpy(&pchan->dma_config, config, sizeof(*config)); + + /* set channel config reg */ + spin_lock_irqsave(&chan_to_ddma(pchan)->lock, flag); + if (pchan->id > 3) { + chal_cfg = phytium_ddma_ioread32(chan_to_ddma(pchan), + DMA_CHAL_CFG_H); + chal_cfg &= ~(0xFF << ((pchan->id - 4) * 8)); + chal_cfg |= DMA_CHAL_SEL((pchan->id - 4), pchan->request_line); + chal_cfg |= DMA_CHAL_SEL_EN(pchan->id - 4); + phytium_ddma_iowrite32(chan_to_ddma(pchan), DMA_CHAL_CFG_H, + chal_cfg); + } else { + chal_cfg = phytium_ddma_ioread32(chan_to_ddma(pchan), + DMA_CHAL_CFG_L); + chal_cfg &= ~(0xFF << (pchan->id * 8)); + chal_cfg |= DMA_CHAL_SEL((pchan->id), pchan->request_line); + chal_cfg |= DMA_CHAL_SEL_EN(pchan->id); + phytium_ddma_iowrite32(chan_to_ddma(pchan), DMA_CHAL_CFG_L, + chal_cfg); + } + spin_unlock_irqrestore(&chan_to_ddma(pchan)->lock, flag); + + /* set channel mode */ + req_mode = (config->direction == DMA_DEV_TO_MEM) ? + DMA_RX_REQ : DMA_TX_REQ; + phytium_chan_iowrite32(pchan, DMA_CHALX_CTL, req_mode << 2); + + /* set channel timeout */ + phytium_chan_iowrite32(pchan, DMA_CHALX_TIMEOUT_CNT, + timeout | DMA_CHAL_TIMEOUT_EN); + + return 0; +} + +static struct dma_async_tx_descriptor *phytium_ddma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + u32 sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct phytium_ddma_device *ddma = to_ddma_device(chan); + struct phytium_ddma_chan *pchan = to_ddma_chan(chan); + struct dma_slave_config *sconfig = &pchan->dma_config; + struct phytium_ddma_desc *desc = NULL; + struct scatterlist *sg = NULL; + int i = 0; + char *tmp; + + if (unlikely(!is_slave_direction(direction))) { + dev_err(ddma->dev, "invalid dma direction\n"); + return NULL; + } + + if (unlikely(sg_len < 1)) { + dev_err(ddma->dev, "invalid segment length: %d\n", sg_len); + return NULL; + } + + desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); + if (!desc) + return NULL; + + /* set sg list */ + for_each_sg(sgl, sg, sg_len, i) { + tmp = phys_to_virt(sg_dma_address(sg)); + desc->sg_req[i].direction = direction; + + switch (direction) { + case DMA_MEM_TO_DEV: + desc->sg_req[i].len = sg_dma_len(sg); + desc->sg_req[i].mem_addr_l = + sg_dma_address(sg) & 0xFFFFFFFF; + desc->sg_req[i].mem_addr_h = + (sg_dma_address(sg) >> 32) & 0xFFFFFFFF; + desc->sg_req[i].dev_addr = + sconfig->dst_addr & 0xFFFFFFFF; + break; + + case DMA_DEV_TO_MEM: + desc->sg_req[i].len = sg_dma_len(sg); + desc->sg_req[i].mem_addr_l = + sg_dma_address(sg) & 0xFFFFFFFF; + desc->sg_req[i].mem_addr_h = + (sg_dma_address(sg) >> 32) & 0xFFFFFFFF; + desc->sg_req[i].dev_addr = + sconfig->src_addr & 0xFFFFFFFF; + break; + + default: + return NULL; + } + } + + desc->num_sgs = sg_len; + + return vchan_tx_prep(&pchan->vchan, &desc->vdesc, flags); +} + +static irqreturn_t phytium_dma_interrupt(int irq, void *dev_id) +{ + struct phytium_ddma_device *ddma = dev_id; + struct phytium_ddma_chan *chan; + u32 irq_status = 0; + u32 i = 0; + u32 val = 0; + + phytium_ddma_irq_disable(ddma); + + irq_status = phytium_ddma_irq_read(ddma); + val = phytium_ddma_ioread32(ddma, DMA_CTL); + + /* Poll, clear and process every chanel interrupt status */ + for (i = 0; i < ddma->dma_channels; i++) { + if (!(irq_status & BIT(i * 4))) + continue; + + chan = &ddma->chan[i]; + phytium_chan_xfer_done(chan); + } + + phytium_ddma_irq_enable(ddma); + + return IRQ_HANDLED; +} + + +static struct dma_chan *phytium_ddma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct phytium_ddma_device *ddma = ofdma->of_dma_data; + struct device *dev = ddma->dev; + struct phytium_ddma_chan *chan = NULL; + struct dma_chan *c = NULL; + u32 channel_id = 0; + + channel_id = dma_spec->args[0]; + + if (channel_id > ddma->dma_channels) { + dev_err(dev, "bad channel %d\n", channel_id); + return NULL; + } + + chan = &ddma->chan[channel_id]; + chan->request_line = dma_spec->args[1]; + c = dma_get_slave_channel(&chan->vchan.chan); + if (!c) { + dev_err(dev, "no more channels available\n"); + return NULL; + } + + return c; +} + +static int phytium_ddma_probe(struct platform_device *pdev) +{ + struct phytium_ddma_device *ddma; + struct dma_device *dma_dev; + struct resource *mem; + u32 i = 0; + int ret = 0; + u32 nr_channels = 0; + + ddma = devm_kzalloc(&pdev->dev, sizeof(*ddma), GFP_KERNEL); + if (!ddma) { + ret = -ENOMEM; + goto out; + } + + dma_dev = &ddma->dma_dev; + ddma->dev = &pdev->dev; + + spin_lock_init(&ddma->lock); + + ddma->irq = platform_get_irq(pdev, 0); + if (ddma->irq < 0) { + dev_err(&pdev->dev, "no irq resource\n"); + ret = -EINVAL; + goto out; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ddma->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(ddma->base)) { + dev_err(&pdev->dev, "no resource address"); + ret = PTR_ERR(ddma->base); + goto out; + } + + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", + &nr_channels); + if (ret < 0) { + dev_err(&pdev->dev, + "can't get the number of dma channels: %d\n", ret); + goto out; + } + + if (nr_channels > DDMA_MAX_NR_PCHANNELS) { + dev_warn(&pdev->dev, "over the max number of channels\n"); + nr_channels = DDMA_MAX_NR_PCHANNELS; + } + + ddma->dma_channels = DDMA_MAX_NR_PCHANNELS; + + ret = devm_request_irq(&pdev->dev, ddma->irq, phytium_dma_interrupt, + IRQF_SHARED, dev_name(&pdev->dev), ddma); + if (ret) { + dev_err(&pdev->dev, "could not to request irq %d", ddma->irq); + goto out; + } + + /* Set capabilities */ + dma_cap_set(DMA_SLAVE, ddma->dma_dev.cap_mask); + + /* DMA capabilities */ + dma_dev->dev = ddma->dev; + dma_dev->chancnt = ddma->dma_channels; + dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + /* function callback */ + dma_dev->device_tx_status = phytium_ddma_tx_status; + dma_dev->device_issue_pending = phytium_ddma_issue_pending; + dma_dev->device_terminate_all = phytium_ddma_terminate_all; + dma_dev->device_alloc_chan_resources = + phytium_ddma_alloc_chan_resources; + dma_dev->device_free_chan_resources = phytium_ddma_free_chan_resources; + dma_dev->device_config = phytium_ddma_slave_config; + dma_dev->device_prep_slave_sg = phytium_ddma_prep_slave_sg; + dma_dev->device_pause = phytium_chan_pause; + dma_dev->device_resume = phytium_chan_resume; + + /* init dma physical channels */ + INIT_LIST_HEAD(&dma_dev->channels); + ddma->chan = devm_kcalloc(ddma->dev, ddma->dma_channels, + sizeof(*ddma->chan), GFP_KERNEL); + if (!ddma->chan) { + ret = -ENOMEM; + goto out; + } + for (i = 0; i < ddma->dma_channels; i++) { + ddma->chan[i].id = i; + ddma->chan[i].buf = NULL; + ddma->chan[i].base = ddma->base + DMA_REG_LEN + + i * CHAN_REG_LEN; + ddma->chan[i].vchan.desc_free = phytium_ddma_vdesc_free; + ddma->chan[i].desc = NULL; + ddma->chan[i].current_sg = NULL; + vchan_init(&ddma->chan[i].vchan, dma_dev); + } + + phytium_dma_hw_init(ddma); + + ret = dma_async_device_register(dma_dev); + if (ret) + goto out; + + ret = of_dma_controller_register(pdev->dev.of_node, + phytium_ddma_of_xlate, ddma); + if (ret < 0) { + dev_err(&pdev->dev, + "phytium ddma of register failed %d\n", ret); + goto err_unregister; + } + + platform_set_drvdata(pdev, ddma); + dev_info(ddma->dev, "phytium DDMA Controller registered\n"); + + return 0; + +err_unregister: + dma_async_device_unregister(dma_dev); + +out: + return ret; +} + +static void phytium_ddma_chan_remove(struct phytium_ddma_chan *chan) +{ + phytium_chan_irq_disable(chan); + phytium_chan_disable(chan); + + if (chan->buf) + dma_free_coherent(chan_to_dev(chan), 4 * PAGE_SIZE, chan->buf, + chan->paddr); + + tasklet_kill(&chan->vchan.task); + list_del(&chan->vchan.chan.device_node); +} + +static int phytium_ddma_remove(struct platform_device *pdev) +{ + struct phytium_ddma_device *ddma = platform_get_drvdata(pdev); + struct phytium_ddma_chan *chan = NULL; + int i = 0; + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&ddma->dma_dev); + + for (i = 0; i < ddma->dma_channels; i++) { + chan = &ddma->chan[i]; + phytium_ddma_chan_remove(chan); + } + + phytium_ddma_irq_disable(ddma); + phytium_ddma_disable(ddma); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int phytium_ddma_suspend(struct device *dev) +{ + struct phytium_ddma_device *ddma = dev_get_drvdata(dev); + int i = 0; + + for (i = 0; i < ddma->dma_channels; i++) { + if (phytium_chan_is_running(&ddma->chan[i])) { + dev_warn(dev, + "suspend is prevented by channel %d\n", i); + return -EBUSY; + } + } + + ddma->dma_reg.dma_chal_cfg0 = + phytium_ddma_ioread32(ddma, DMA_CHAL_CFG_L); + ddma->dma_reg.dma_chal_bind = + phytium_ddma_ioread32(ddma, DMA_CHAL_BIND); + ddma->dma_reg.dma_chal_cfg1 = + phytium_ddma_ioread32(ddma, DMA_CHAL_CFG_H); + + for (i = 0; i < ddma->dma_channels; i++) { + struct phytium_ddma_chan *chan = &ddma->chan[i]; + + if (!chan->is_used) + continue; + ddma->dma_chal_reg[i].dma_chalx_ctl = + phytium_chan_ioread32(chan, DMA_CHALX_CTL); + ddma->dma_chal_reg[i].dma_chalx_timeout_cnt = + phytium_chan_ioread32(chan, DMA_CHALX_TIMEOUT_CNT); + } + + phytium_ddma_irq_disable(ddma); + phytium_ddma_disable(ddma); + pm_runtime_force_suspend(dev); + + return 0; +} + +static int phytium_ddma_resume(struct device *dev) +{ + struct phytium_ddma_device *ddma = dev_get_drvdata(dev); + u32 i = 0; + int ret = 0; + + phytium_dma_hw_init(ddma); + phytium_ddma_iowrite32(ddma, DMA_CHAL_CFG_L, + ddma->dma_reg.dma_chal_cfg0); + phytium_ddma_iowrite32(ddma, DMA_CHAL_BIND, + ddma->dma_reg.dma_chal_bind); + phytium_ddma_iowrite32(ddma, DMA_CHAL_CFG_H, + ddma->dma_reg.dma_chal_cfg1); + + for (i = 0; i < ddma->dma_channels; i++) { + struct phytium_ddma_chan *chan = &ddma->chan[i]; + + if (!chan->is_used) + continue; + phytium_chan_iowrite32(chan, DMA_CHALX_CTL, + ddma->dma_chal_reg[i].dma_chalx_ctl); + phytium_chan_iowrite32(chan, DMA_CHALX_TIMEOUT_CNT, + ddma->dma_chal_reg[i].dma_chalx_timeout_cnt); + } + + ret = pm_runtime_force_resume(dev); + + return ret; +} +#endif + +static const struct dev_pm_ops phytium_ddma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(phytium_ddma_suspend, + phytium_ddma_resume) +}; + +static const struct of_device_id phytium_dma_of_id_table[] = { + { .compatible = "phytium,ddma" }, + {} +}; +MODULE_DEVICE_TABLE(of, phytium_dma_of_id_table); + +static struct platform_driver phytium_driver = { + .probe = phytium_ddma_probe, + .remove = phytium_ddma_remove, + .driver = { + .name = "phytium-ddma", + .of_match_table = of_match_ptr(phytium_dma_of_id_table), + .pm = &phytium_ddma_pm_ops, + }, +}; + +module_platform_driver(phytium_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Phytium DDMA Controller platform driver"); +MODULE_AUTHOR("HuangJie "); diff --git a/drivers/dma/phytium/phytium-ddmac.h b/drivers/dma/phytium/phytium-ddmac.h new file mode 100644 index 0000000000000000000000000000000000000000..81bc0e19e64961ad83abe8ba1f3462eeca50d473 --- /dev/null +++ b/drivers/dma/phytium/phytium-ddmac.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Phytium Device DDMA Controller driver. + * + * Copyright (c) 2023 Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_DDMAC_H +#define _PHYTIUM_DDMAC_H + +#include +#include +#include +#include +#include +#include "../virt-dma.h" + +/* the number of physical channel */ +#define DDMA_MAX_NR_PCHANNELS 8 + +#define DMAC_MAX_MASTERS 1 +#define DMAC_MAX_BLK_SIZE PAGE_SIZE + +#define CHAN_REG_LEN 0x40 +#define DMA_REG_LEN 0x40 + +#define DMA_CTL 0x00 +#define DMA_CHAL_CFG_L 0x04 +#define DMA_CHAL_CFG_H 0x28 +#define DMA_STAT 0x08 +#define DMA_MASK_INT 0x0C +#define DMA_CHAL_BIND 0x20 +#define DMA_GCAP 0x24 + +#define DMA_CHALX_DDR_UPADDR 0x00 +#define DMA_CHALX_DDR_LWADDR 0x04 +#define DMA_CHALX_DEV_ADDR 0x08 +#define DMA_CHALX_TS 0x0C +#define DMA_CHALX_CRT_UPADDR 0x10 +#define DMA_CHALX_CRT_LWADDR 0x14 +#define DMA_CHALX_CTL 0x18 +#define DMA_CHALX_STS 0x1C +#define DMA_CHALX_TIMEOUT_CNT 0x20 +#define DMA_CHALX_TRANS_CNT 0x24 + +#define DMA_CTL_EN BIT(0) +#define DMA_CTL_SRST BIT(1) + +#define DMA_CHAL_SEL(id, x) (min_t(unsigned int, x, 0x7F) << ((id) * 8)) +#define DMA_CHAL_SEL_EN(id) BIT((id) * 8 + 7) + +#define DMA_STAT_CHAL(id) BIT((id) * 4) + +#define DMA_INT_EN BIT(31) +#define DMA_INT_CHAL_EN(id) BIT(id) + +#define DMA_CHAL_EN BIT(0) +#define DMA_CHAL_SRST BIT(1) +#define DMA_CHAL_MODE BIT(2) + +#define DMA_RX_REQ 1 +#define DMA_TX_REQ 0 + +#define DMA_CHAL_TIMEOUT_EN BIT(31) +#define DMA_CHAL_TIMEOUT_CNT(x) min_t(unsigned int, x, 0xFFFFF) + +#define DMA_TIMEOUT 10 + +/** + * struct phytium_ddma_sg_req - scatter-gatter list data info + * @len: number of bytes to transform + * @mem_addr_l: bus address low 32bit + * @mem_addr_h: bus address high 32bit + * @dev_addr: dma cousumer data reg addr + * @direction: dma transmit direction + */ +struct phytium_ddma_sg_req { + u32 len; + u32 mem_addr_l; + u32 mem_addr_h; + u32 dev_addr; + enum dma_transfer_direction direction; +}; + +/** + * struct phytium_ddma_desc - the struct holding info describing ddma request + * descriptor + * @vdesc: ddma request descriptor + * @num_sgs: the size of scatter-gatter list + * @sg_req: use to save scatter-gatter list info + */ +struct phytium_ddma_desc { + struct virt_dma_desc vdesc; + u32 num_sgs; + struct phytium_ddma_sg_req sg_req[]; +}; + +/** + * struct phytium_ddma_chan - the struct holding info describing dma channel + * @vchan: virtual dma channel + * @base: the mapped register I/O of dma physical channel + * @id: the id of ddma physical channel + * @request_line: the request line of ddma channel + * @desc: the transform request descriptor + * @dma_config: config parameters for dma channel + * @busy: the channel busy flag, this flag set when channel is tansferring + * @is_used: the channel bind flag, this flag set when channel binded + * @next_sg: the index of next scatter-gatter + * @current_sg: use to save the current transfer scatter-gatter info + * @paddr: use to align data between dma provider and consumer + */ +struct phytium_ddma_chan { + struct virt_dma_chan vchan; + void __iomem *base; + u32 id; + u32 request_line; + struct phytium_ddma_desc *desc; + struct dma_slave_config dma_config; + bool busy; + bool is_used; + bool is_pasued; + u32 next_sg; + struct phytium_ddma_sg_req *current_sg; + dma_addr_t paddr; + char *buf; +}; + +struct global_reg { + u32 dma_chal_cfg0; + u32 dma_chal_bind; + u32 dma_chal_cfg1; +}; + +struct channel_reg { + u32 dma_chalx_ctl; + u32 dma_chalx_timeout_cnt; +}; + +/** + * struct phytium_ddma_device - the struct holding info describing DDMA device + * @dma_dev: an instance for struct dma_device + * @irq: the irq that DDMA using + * @base: the mapped register I/O base of this DDMA + * @core_clk: DDMA clock + * @dma_channels: the number of DDMA physical channels + * @chan: the phyical channels of DDMA + * @lock: spinlock to lock when set global registers + * @dma_reg: store global register value which need recover after resume + * @dma_chal_reg: store channel register value which need recover after resume + */ +struct phytium_ddma_device { + struct dma_device dma_dev; + struct device *dev; + int irq; + void __iomem *base; + struct clk *core_clk; + u32 dma_channels; + struct phytium_ddma_chan *chan; + spinlock_t lock; + struct global_reg dma_reg; + struct channel_reg dma_chal_reg[DDMA_MAX_NR_PCHANNELS]; +}; + +#endif /* _PHYTIUM_DDMAC_H */ diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 7a47680d6f0786058201d2f02904b1211807c84b..28d639a5727c11b0b41c660af68cc98593313697 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -537,4 +537,11 @@ config EDAC_DMC520 Support for error detection and correction on the SoCs with ARM DMC-520 DRAM controller. +config EDAC_PHYTIUM + tristate "Phytium Pe220x SoC" + depends on (ARM64) + help + Support for error detection and correction on the + Phytium Pe220x family of SOCs. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 3a849168780dd1680a6e9e55e3500d276be87f2d..d259f76bd33d3694d7c71fe907dd7bf713db4ded 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -89,3 +89,4 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o +obj-$(CONFIG_EDAC_PHYTIUM) += phytium_edac.o diff --git a/drivers/edac/phytium_edac.c b/drivers/edac/phytium_edac.c new file mode 100644 index 0000000000000000000000000000000000000000..d16ac658d780b96c5632f016c6ef267b114dc19e --- /dev/null +++ b/drivers/edac/phytium_edac.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Phytium Pe220x EDAC (error detection and correction) + * + * Copyright (c) 2023 Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "edac_module.h" + +#define EDAC_MOD_STR "phytium_edac" + +/* register offset */ +#define ERR_STATUS(n) (0x10 + ((n) * 64)) +#define ERR_CTLR(n) (0x08 + ((n) * 64)) +#define ERR_MISC0(n) (0x20 + ((n) * 64)) +#define ERR_INJECT 0x7C +#define ERR_DEVID 0xFC8 +#define ERR_GSR 0xE00 + +#define CTLR_ED BIT(0) +#define CTLR_UI BIT(2) +#define CTLR_CFI BIT(8) + +#define MISC0_CEC(x) ((u64)(x) << 32) + +#define ERR_STATUS_CLEAR GENMASK(31, 0) + +#define CORRECTED_ERROR 0 +#define UNCORRECTED_ERROR 1 + +#define MAX_ERR_GROUP 3 + +struct phytium_edac { + struct device *dev; + void __iomem **ras_base; + struct dentry *dfs; + struct edac_device_ctl_info *edac_dev; +}; + +struct ras_error_info { + u32 index; + u32 error_type; + const char *error_str; +}; + +/* error severity definition */ +enum { + SEV_NO = 0x0, + SEV_CORRECTED = 0x1, + SEV_RECOVERABLE = 0x2, + SEV_PANIC = 0x3, +}; + +/* soc error record */ +static const struct ras_error_info pe220x_ras_soc_error[] = { + { 0, UNCORRECTED_ERROR, "lsd_nfc_ras_error" }, + { 1, UNCORRECTED_ERROR, "lsd_lpc_ras_long_wait_to" }, + { 2, UNCORRECTED_ERROR, "lsd_lpc_ras_short_wait_to" }, + { 3, UNCORRECTED_ERROR, "lsd_lpc_ras_sync_err" }, + { 4, UNCORRECTED_ERROR, "lsd_lbc_ras_err" }, + { 5, UNCORRECTED_ERROR, "usb3_err_0" }, + { 6, UNCORRECTED_ERROR, "usb3_err_1" }, + { 7, UNCORRECTED_ERROR, "gsd_gmu_mac0_asf_nonfatal_int" }, + { 8, UNCORRECTED_ERROR, "gsd_gmu_mac0_asf_fatal_int" }, + { 9, UNCORRECTED_ERROR, "gsd_gmu_mac0_asf_trans_to_err" }, + { 10, UNCORRECTED_ERROR, "gsd_gmu_mac0_asf_protocol_err" }, + { 11, UNCORRECTED_ERROR, "gsd_gmu_mac1_asf_nonfatal_int" }, + { 12, UNCORRECTED_ERROR, "gsd_gmu_mac1_asf_fatal_int" }, + { 13, UNCORRECTED_ERROR, "gsd_gmu_mac1_asf_trans_to_err" }, + { 14, UNCORRECTED_ERROR, "gsd_gmu_mac1_asf_protocol_err" }, + { 15, UNCORRECTED_ERROR, "gsd_gmu_mac2_asf_nonfatal_int" }, + { 16, UNCORRECTED_ERROR, "gsd_gmu_mac2_asf_fatal_int" }, + { 17, UNCORRECTED_ERROR, "gsd_gmu_mac2_asf_trans_to_err" }, + { 18, UNCORRECTED_ERROR, "gsd_gmu_mac2_asf_protocol_err" }, + { 19, UNCORRECTED_ERROR, "gsd_gmu_mac3_asf_nonfatal_int" }, + { 20, UNCORRECTED_ERROR, "gsd_gmu_mac3_asf_fatal_int" }, + { 21, UNCORRECTED_ERROR, "gsd_gmu_mac3_asf_trans_to_err" }, + { 22, UNCORRECTED_ERROR, "gsd_gmu_mac3_asf_protocol_err" }, + { 23, CORRECTED_ERROR, "dmu_ras_ecc_corrected_error" }, + { 24, UNCORRECTED_ERROR, "dmu_ras_ecc_uncorrected_error" }, + { 25, UNCORRECTED_ERROR, "cci_ras_nERRIRQ" }, + { 26, UNCORRECTED_ERROR, "smmu_tcu_ras_irpt" }, + { 27, UNCORRECTED_ERROR, "smmu_tbu0_ras_irpt" }, + { 28, UNCORRECTED_ERROR, "smmu_tbu1_ras_irpt" }, + { 29, UNCORRECTED_ERROR, "smmu_tbu2_ras_irpt" }, + { 30, UNCORRECTED_ERROR, "ocm_sram_ue" }, + { 31, CORRECTED_ERROR, "ocm_sram_ce" }, + { 32, UNCORRECTED_ERROR, "int_axim_err" }, + { 33, UNCORRECTED_ERROR, "int_fatal_error" }, + { 34, UNCORRECTED_ERROR, "nEXTERRIRQ_clust0" }, + { 35, UNCORRECTED_ERROR, "nINTERRIRQ_clust0" }, + { 36, UNCORRECTED_ERROR, "nEXTERRIRQ_clust1" }, + { 37, UNCORRECTED_ERROR, "nINTERRIRQ_clust1" }, + { 38, UNCORRECTED_ERROR, "nEXTERRIRQ_clust2" }, + { 39, UNCORRECTED_ERROR, "nINTERRIRQ_clust2" }, + { 40, UNCORRECTED_ERROR, "ams_ame0_ras_err" }, + { 41, UNCORRECTED_ERROR, "ams_ame1_ras_err" }, + { 42, UNCORRECTED_ERROR, "ams_amer_ras_err" }, + { 43, UNCORRECTED_ERROR, "ras_err_ame1" }, +}; + +/* pcie controller error record */ +static const struct ras_error_info pe220x_ras_peu_psu_error[] = { + { 0, CORRECTED_ERROR, "pio_rd_addr_error" }, + { 1, UNCORRECTED_ERROR, "pio_wr_addr_error" }, + { 2, CORRECTED_ERROR, "pio_rd_timeout" }, + { 3, CORRECTED_ERROR, "pio_wr_timeout" }, + { 4, CORRECTED_ERROR, "axi_b_rsp_error" }, + { 5, CORRECTED_ERROR, "axi_r_rsp_error" }, +}; + +static const struct ras_error_info pe220x_ras_peu_error[] = { + { 0, CORRECTED_ERROR, "pio_rd_addr_error" }, + { 1, UNCORRECTED_ERROR, "pio_wr_addr_error" }, + { 2, CORRECTED_ERROR, "pio_rd_timeout" }, + { 3, CORRECTED_ERROR, "pio_wr_timeout" }, + { 4, CORRECTED_ERROR, "axi_b_rsp_error" }, + { 5, CORRECTED_ERROR, "axi_r_rsp_error" }, +}; + +static const struct ras_error_info *pe220x_ras_error[] = { + pe220x_ras_soc_error, pe220x_ras_peu_psu_error, pe220x_ras_peu_error +}; + +static inline unsigned int get_error_num(const struct phytium_edac *edac, + int err_group) +{ + unsigned int error_num = 0; + + error_num = readl(edac->ras_base[err_group] + ERR_DEVID); + + return error_num; +} + +static inline void phytium_ras_setup(const struct phytium_edac *edac) +{ + u64 val = 0; + unsigned int i = 0; + /* + * enable error report and generate interrupt for corrected error event + * first error record owned by node present the node configuration + */ + for (i = 0; i < MAX_ERR_GROUP; i++) { + val = readq(edac->ras_base[i] + ERR_CTLR(0)); + val |= CTLR_ED | CTLR_UI | CTLR_CFI; + writeq(val, edac->ras_base[i] + ERR_CTLR(0)); + } +} + +static ssize_t phytium_edac_inject_ctrl_write(struct file *filp, + const char __user *buf, + size_t size, loff_t *ppos) +{ + int ret = 0; + int res = 0; + unsigned int error_group = 0; + unsigned int error_id = 0; + unsigned int error_num = 0; + struct phytium_edac *edac = filp->private_data; + char str[255]; + char *p_str = str; + char *tmp = NULL; + + if (size > 255) { + ret = -EFAULT; + goto out; + } + + if (copy_from_user(str, buf, size)) { + ret = -EFAULT; + goto out; + } else { + *ppos += size; + ret = size; + } + str[size] = '\0'; + + tmp = strsep(&p_str, ","); + if (!tmp) + goto out; + + res = kstrtouint(tmp, 0, &error_group); + if (res || error_group >= MAX_ERR_GROUP) { + dev_err(edac->dev, "invalid error group parameters"); + goto out; + } + + res = kstrtouint(p_str, 0, &error_id); + if (res) { + dev_err(edac->dev, "invalid error id parameters"); + goto out; + } + + error_num = get_error_num(edac, error_group); + if (error_id >= error_num) { + dev_err(edac->dev, "invalid ras error id.\n"); + goto out; + } + + dev_dbg(edac->dev, "inject group%d, error_id: %d\n", + error_group, error_id); + + if (pe220x_ras_error[error_group][error_id].error_type == + CORRECTED_ERROR) { + writeq(MISC0_CEC(0xFF), + edac->ras_base[error_group] + ERR_MISC0(error_id)); + } + + writel(error_id, edac->ras_base[error_group] + ERR_INJECT); + +out: + return ret; +} + +static const struct file_operations phytium_edac_debug_inject_fops[] = { + { + .open = simple_open, + .write = phytium_edac_inject_ctrl_write, + .llseek = generic_file_llseek, }, + { } +}; + +static void phytium_edac_create_debugfs_nodes(struct phytium_edac *edac) +{ + if (!IS_ENABLED(CONFIG_EDAC_DEBUG) || !edac->dfs) { + dev_info(edac->dev, "edac debug is disable"); + return; + } + + edac_debugfs_create_file("error_inject_ctrl", S_IWUSR, edac->dfs, edac, + &phytium_edac_debug_inject_fops[0]); +} + +static int phytium_edac_device_add(struct phytium_edac *edac) +{ + struct edac_device_ctl_info *edac_dev; + int res = 0; + + edac_dev = edac_device_alloc_ctl_info( + sizeof(struct edac_device_ctl_info), + "ras", 1, "soc", 1, 0, NULL, + 0, edac_device_alloc_index()); + if (!edac_dev) + res = -ENOMEM; + + edac_dev->dev = edac->dev; + edac_dev->mod_name = EDAC_MOD_STR; + edac_dev->ctl_name = "phytium ras"; + edac_dev->dev_name = "soc"; + + phytium_edac_create_debugfs_nodes(edac); + + res = edac_device_add_device(edac_dev); + if (res > 0) { + dev_err(edac->dev, "edac_device_add_device failed\n"); + goto err_free; + } + + edac->edac_dev = edac_dev; + dev_info(edac->dev, "phytium edac device registered\n"); + return 0; + +err_free: + edac_device_free_ctl_info(edac_dev); + return res; +} + +static int phytium_edac_device_remove(struct phytium_edac *edac) +{ + struct edac_device_ctl_info *edac_dev = edac->edac_dev; + + debugfs_remove_recursive(edac->dfs); + edac_device_del_device(edac_dev->dev); + edac_device_free_ctl_info(edac_dev); + return 0; +} + +static int get_error_id(struct phytium_edac *edac, int *error_id, + int *error_group) +{ + unsigned int error_num = 0; + u64 error_bit = 0; + int ret = 0; + int i = 0; + int err_id = 0; + + /* Iterate over the ras node to check error status */ + for (i = 0; i < MAX_ERR_GROUP; i++) { + error_num = get_error_num(edac, i); + error_bit = readq(edac->ras_base[i] + ERR_GSR); + for (err_id = 0; err_id < error_num; err_id++) { + if (!(error_bit & BIT(err_id))) + continue; + else + break; + } + if (err_id < error_num) { + *error_id = err_id; + *error_group = i; + break; + } + } + + if (i >= MAX_ERR_GROUP) { + ret = -1; + dev_warn(edac->dev, "no error detect.\n"); + } + + return ret; +} + +static void phytium_edac_error_report(struct phytium_edac *edac, + const int error_id, + const int error_group) +{ + const struct ras_error_info *err_info = + pe220x_ras_error[error_group]; + + if (err_info[error_id].error_type == UNCORRECTED_ERROR) { + edac_printk(KERN_CRIT, EDAC_MOD_STR, "uncorrected error: %s\n", + err_info[error_id].error_str); + edac_device_handle_ue(edac->edac_dev, 0, 0, + err_info[error_id].error_str); + /* Report the error via the trace interface */ + if (IS_ENABLED(CONFIG_RAS)) + trace_non_standard_event(&NULL_UUID_LE, &NULL_UUID_LE, + EDAC_MOD_STR, SEV_RECOVERABLE, + err_info[error_id].error_str, + strlen(err_info[error_id].error_str)); + } else { + edac_printk(KERN_CRIT, EDAC_MOD_STR, "corrected error: %s\n", + err_info[error_id].error_str); + edac_device_handle_ce(edac->edac_dev, 0, 0, + err_info[error_id].error_str); + /* Report the error via the trace interface */ + if (IS_ENABLED(CONFIG_RAS)) + trace_non_standard_event(&NULL_UUID_LE, &NULL_UUID_LE, + EDAC_MOD_STR, SEV_CORRECTED, + err_info[error_id].error_str, + strlen(err_info[error_id].error_str)); + } +} + +/* + * clear error status and set correct error counter to 0xFE for trigger + * interrupt when next correct error event + */ +static void phytium_edac_clear_error_status(struct phytium_edac *edac, + const int error_id, + const int error_group) +{ + writeq(MISC0_CEC(0XFE), edac->ras_base[error_group] + + ERR_MISC0(error_id)); + writeq(GENMASK(31, 0), edac->ras_base[error_group] + + ERR_STATUS(error_id)); +} + +static irqreturn_t phytium_edac_isr(int irq, void *dev_id) +{ + struct phytium_edac *edac = dev_id; + int ret = 0; + int error_group; + int error_id; + + ret = get_error_id(edac, &error_id, &error_group); + if (ret < 0) + goto out; + + phytium_edac_error_report(edac, error_id, error_group); + phytium_edac_clear_error_status(edac, error_id, error_group); + +out: + return IRQ_HANDLED; +} + +static int phytium_edac_probe(struct platform_device *pdev) +{ + struct phytium_edac *edac; + struct resource *res; + int ret = 0; + int irq_cnt = 0; + int irq = 0; + int i = 0; + + edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL); + if (!edac) { + ret = -ENOMEM; + goto out; + } + + edac->dev = &pdev->dev; + platform_set_drvdata(pdev, edac); + + edac->ras_base = devm_kcalloc(&pdev->dev, 3, + sizeof(*edac->ras_base), GFP_KERNEL); + if (!edac->ras_base) { + return -ENOMEM; + goto out; + } + + for (i = 0; i < MAX_ERR_GROUP; i++) { + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + edac->ras_base[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(edac->ras_base[i])) { + dev_err(&pdev->dev, "no resource address\n"); + ret = PTR_ERR(edac->ras_base[i]); + goto out; + } + } + + edac->dfs = edac_debugfs_create_dir(EDAC_MOD_STR); + + ret = phytium_edac_device_add(edac); + if (ret) { + dev_err(&pdev->dev, "can't add edac device"); + goto out; + } + + phytium_ras_setup(edac); + + irq_cnt = platform_irq_count(pdev); + if (irq_cnt < 0) { + dev_err(&pdev->dev, "no irq resource\n"); + ret = -EINVAL; + goto out; + } + + for (i = 0; i < irq_cnt; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + dev_err(&pdev->dev, "invalid irq resource\n"); + ret = -EINVAL; + goto out; + } + ret = devm_request_irq(&pdev->dev, irq, + phytium_edac_isr, IRQF_SHARED, + EDAC_MOD_STR, edac); + if (ret) { + dev_err(&pdev->dev, + "could not request irq %d\n", irq); + goto out; + } + } + +out: + return ret; +} + +static int phytium_edac_remove(struct platform_device *pdev) +{ + struct phytium_edac *edac = dev_get_drvdata(&pdev->dev); + + phytium_edac_device_remove(edac); + + return 0; +} + +static const struct of_device_id phytium_edac_of_match[] = { + { .compatible = "phytium,pe220x-edac" }, + {}, +}; +MODULE_DEVICE_TABLE(of, phytium_edac_of_match); + +static struct platform_driver phytium_edac_driver = { + .probe = phytium_edac_probe, + .remove = phytium_edac_remove, + .driver = { + .name = "phytium-edac", + .of_match_table = phytium_edac_of_match, + }, +}; + +module_platform_driver(phytium_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Huangjie "); +MODULE_DESCRIPTION("Phytium Pe220x EDAC driver"); diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 5c7b2443eed41a792a0b353444d7daf31f760224..ccc12c8ac4ec4b411f1d6c4682551b32b26280ee 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -85,6 +85,7 @@ obj-$(CONFIG_SPI_ORION) += spi-orion.o obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium.o obj-$(CONFIG_SPI_PHYTIUM_PLAT) += spi-phytium-plat.o obj-$(CONFIG_SPI_PHYTIUM_PCI) += spi-phytium-pci.o +obj-$(CONFIG_SPI_PHYTIUM) += spi-phytium-dma.o obj-$(CONFIG_SPI_PIC32) += spi-pic32.o obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o obj-$(CONFIG_SPI_PL022) += spi-pl022.o diff --git a/drivers/spi/spi-phytium-dma.c b/drivers/spi/spi-phytium-dma.c new file mode 100644 index 0000000000000000000000000000000000000000..d6b14e1831228188eccadd9afe90f70e3403c228 --- /dev/null +++ b/drivers/spi/spi-phytium-dma.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Special handling for phytium DMA core + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "spi-phytium.h" + +#define RX_BUSY 0 +#define RX_BURST_LEVEL 16 +#define TX_BUSY 1 +#define TX_BURST_LEVEL 16 + +#define DMA_MAX_BUF_SIZE 4096 + +static void phytium_spi_dma_maxburst_init(struct phytium_spi *fts) +{ + struct dma_slave_caps caps; + u32 max_burst, def_burst; + int ret; + + def_burst = fts->fifo_len / 2; + + ret = dma_get_slave_caps(fts->rxchan, &caps); + if (!ret && caps.max_burst) + max_burst = caps.max_burst; + else + max_burst = RX_BURST_LEVEL; + + fts->rxburst = min(max_burst, def_burst); + phytium_writel(fts, DMARDLR, 0x0); + + ret = dma_get_slave_caps(fts->txchan, &caps); + if (!ret && caps.max_burst) + max_burst = caps.max_burst; + else + max_burst = TX_BURST_LEVEL; + + /* + * Having a Rx DMA channel serviced with higher priority than a Tx DMA + * channel might not be enough to provide a well balanced DMA-based + * SPI transfer interface. There might still be moments when the Tx DMA + * channel is occasionally handled faster than the Rx DMA channel. + * That in its turn will eventually cause the SPI Rx FIFO overflow if + * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's + * cleared by the Rx DMA channel. In order to fix the problem the Tx + * DMA activity is intentionally slowed down by limiting the SPI Tx + * FIFO depth with a value twice bigger than the Tx burst length. + */ + fts->txburst = min(max_burst, def_burst); + /* set dmatdlr to 0 + 1 */ + phytium_writel(fts, DMATDLR, 0); +} + +static void phytium_spi_dma_sg_burst_init(struct phytium_spi *fts) +{ + struct dma_slave_caps tx = {0}, rx = {0}; + + dma_get_slave_caps(fts->txchan, &tx); + dma_get_slave_caps(fts->rxchan, &rx); + + if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) + fts->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); + else if (tx.max_sg_burst > 0) + fts->dma_sg_burst = tx.max_sg_burst; + else if (rx.max_sg_burst > 0) + fts->dma_sg_burst = rx.max_sg_burst; + else + fts->dma_sg_burst = 0; +} + +static int phytium_spi_dma_init(struct device *dev, struct phytium_spi *fts) +{ + fts->rxchan = dma_request_chan(dev, "rx"); + if (IS_ERR_OR_NULL(fts->rxchan)) + return -ENODEV; + + fts->txchan = dma_request_chan(dev, "tx"); + if (IS_ERR_OR_NULL(fts->txchan)) { + dev_err(dev, "can't request chan\n"); + dma_release_channel(fts->rxchan); + fts->rxchan = NULL; + return -ENODEV; + } + + fts->master->dma_rx = fts->rxchan; + fts->master->dma_tx = fts->txchan; + init_completion(&fts->dma_completion); + + phytium_spi_dma_maxburst_init(fts); + phytium_spi_dma_sg_burst_init(fts); + + return 0; +} + +static void phytium_spi_dma_exit(struct phytium_spi *fts) +{ + if (fts->txchan) { + dmaengine_terminate_sync(fts->txchan); + dma_release_channel(fts->txchan); + } + + if (fts->rxchan) { + dmaengine_terminate_sync(fts->rxchan); + dma_release_channel(fts->rxchan); + } +} + +static irqreturn_t phytium_spi_dma_transfer_handler(struct phytium_spi *fts) +{ + phytium_spi_check_status(fts, false); + + complete(&fts->dma_completion); + + return IRQ_HANDLED; +} + +static bool phytium_spi_can_dma(struct spi_controller *master, + struct spi_device *spi, struct spi_transfer *xfer) +{ + struct phytium_spi *fts = spi_controller_get_devdata(master); + + return xfer->len > fts->fifo_len; +} + +static enum dma_slave_buswidth phytium_spi_dma_convert_width(u8 n_bytes) +{ + if (n_bytes == 1) + return DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (n_bytes == 2) + return DMA_SLAVE_BUSWIDTH_2_BYTES; + + return DMA_SLAVE_BUSWIDTH_UNDEFINED; +} + +static int phytium_spi_dma_wait(struct phytium_spi *fts, unsigned int len, + u32 speed) +{ + unsigned long long ms; + + ms = len * MSEC_PER_SEC * BITS_PER_BYTE; + do_div(ms, speed); + ms += ms + 200; + + if (ms > UINT_MAX) + ms = UINT_MAX; + + ms = wait_for_completion_timeout(&fts->dma_completion, + msecs_to_jiffies(ms)); + + if (ms == 0) { + dev_err(&fts->master->cur_msg->spi->dev, + "DMA transaction timed out\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static inline bool phytium_spi_dma_tx_busy(struct phytium_spi *fts) +{ + return !(phytium_readl(fts, SR) & SR_TF_EMPT); +} + +static int phytium_spi_dma_wait_tx_done(struct phytium_spi *fts, + struct spi_transfer *xfer) +{ + int retry = SPI_WAIT_RETRIES; + struct spi_delay delay; + u32 nents; + + nents = phytium_readl(fts, TXFLR); + delay.unit = SPI_DELAY_UNIT_SCK; + delay.value = nents * fts->n_bytes * BITS_PER_BYTE; + + while (phytium_spi_dma_tx_busy(fts) && retry--) + spi_delay_exec(&delay, xfer); + + if (retry < 0) { + dev_err(&fts->master->dev, "Tx hanged up\n"); + return -EIO; + } + + return 0; +} + +/* + * fts->dma_chan_busy is set before the dma transfer starts, callback for tx + * channel will clear a corresponding bit. + */ +static void phytium_spi_dma_tx_done(void *arg) +{ + struct phytium_spi *fts = arg; + + clear_bit(TX_BUSY, &fts->dma_chan_busy); + if (test_bit(RX_BUSY, &fts->dma_chan_busy)) + return; + + complete(&fts->dma_completion); +} + +static int phytium_spi_dma_config_tx(struct phytium_spi *fts) +{ + struct dma_slave_config txconf; + + memset(&txconf, 0, sizeof(txconf)); + txconf.direction = DMA_MEM_TO_DEV; + txconf.dst_addr = fts->dma_addr; + txconf.dst_maxburst = fts->txburst; + txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + txconf.dst_addr_width = phytium_spi_dma_convert_width(fts->n_bytes); + txconf.device_fc = false; + + return dmaengine_slave_config(fts->txchan, &txconf); +} + +static int phytium_spi_dma_submit_tx(struct phytium_spi *fts, struct scatterlist *sgl, + unsigned int nents) +{ + struct dma_async_tx_descriptor *txdesc; + dma_cookie_t cookie; + int ret; + + txdesc = dmaengine_prep_slave_sg(fts->txchan, sgl, nents, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + return -ENOMEM; + + txdesc->callback = phytium_spi_dma_tx_done; + txdesc->callback_param = fts; + + cookie = dmaengine_submit(txdesc); + ret = dma_submit_error(cookie); + if (ret) { + dmaengine_terminate_sync(fts->txchan); + return ret; + } + + set_bit(TX_BUSY, &fts->dma_chan_busy); + + return 0; +} + +static inline bool phytium_spi_dma_rx_busy(struct phytium_spi *fts) +{ + return !!(phytium_readl(fts, SR) & SR_RF_NOT_EMPT); +} + +static int phytium_spi_dma_wait_rx_done(struct phytium_spi *fts) +{ + int retry = SPI_WAIT_RETRIES; + struct spi_delay delay; + unsigned long ns, us; + u32 nents; + + /* + * It's unlikely that DMA engine is still doing the data fetching, but + * if it's let's give it some reasonable time. The timeout calculation + * is based on the synchronous APB/SSI reference clock rate, on a + * number of data entries left in the Rx FIFO, times a number of clock + * periods normally needed for a single APB read/write transaction + * without PREADY signal utilized (which is true for the phytium APB SSI + * controller). + */ + nents = phytium_readl(fts, RXFLR); + ns = 4U * NSEC_PER_SEC / fts->max_freq * nents; + if (ns <= NSEC_PER_USEC) { + delay.unit = SPI_DELAY_UNIT_NSECS; + delay.value = ns; + } else { + us = DIV_ROUND_UP(ns, NSEC_PER_USEC); + delay.unit = SPI_DELAY_UNIT_USECS; + delay.value = clamp_val(us, 0, USHRT_MAX); + } + + while (phytium_spi_dma_rx_busy(fts) && retry--) + spi_delay_exec(&delay, NULL); + + if (retry < 0) { + dev_err(&fts->master->dev, "Rx hanged up, nents = %d\n", nents); + return -EIO; + } + + return 0; +} + +/* + * fts->dma_chan_busy is set before the dma transfer starts, callback for rx + * channel will clear a corresponding bit. + */ +static void phytium_spi_dma_rx_done(void *arg) +{ + struct phytium_spi *fts = arg; + + clear_bit(RX_BUSY, &fts->dma_chan_busy); + if (test_bit(TX_BUSY, &fts->dma_chan_busy)) + return; + + complete(&fts->dma_completion); +} + +static int phytium_spi_dma_config_rx(struct phytium_spi *fts) +{ + struct dma_slave_config rxconf; + + memset(&rxconf, 0, sizeof(rxconf)); + rxconf.direction = DMA_DEV_TO_MEM; + rxconf.src_addr = fts->dma_addr; + rxconf.src_maxburst = fts->rxburst; + rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + rxconf.src_addr_width = phytium_spi_dma_convert_width(fts->n_bytes); + rxconf.device_fc = false; + + return dmaengine_slave_config(fts->rxchan, &rxconf); +} + +static int phytium_spi_dma_submit_rx(struct phytium_spi *fts, struct scatterlist *sgl, + unsigned int nents) +{ + struct dma_async_tx_descriptor *rxdesc; + dma_cookie_t cookie; + int ret; + + rxdesc = dmaengine_prep_slave_sg(fts->rxchan, sgl, nents, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + return -ENOMEM; + + rxdesc->callback = phytium_spi_dma_rx_done; + rxdesc->callback_param = fts; + + cookie = dmaengine_submit(rxdesc); + ret = dma_submit_error(cookie); + if (ret) { + dmaengine_terminate_sync(fts->rxchan); + return ret; + } + + set_bit(RX_BUSY, &fts->dma_chan_busy); + + return 0; +} + +static int phytium_spi_dma_setup(struct phytium_spi *fts, struct spi_transfer *xfer) +{ + u16 imr, dma_ctrl; + int ret; + + if (!xfer->tx_buf) + return -EINVAL; + + /* Setup DMA channels */ + ret = phytium_spi_dma_config_tx(fts); + if (ret) + return ret; + + if (xfer->rx_buf) { + ret = phytium_spi_dma_config_rx(fts); + if (ret) + return ret; + } + + /* Set the DMA handshaking interface */ + dma_ctrl = SPI_DMA_TDMAE; + if (xfer->rx_buf) + dma_ctrl |= SPI_DMA_RDMAE; + phytium_writel(fts, DMACR, dma_ctrl); + + /* Set the interrupt mask */ + imr = INT_TXOI; + if (xfer->rx_buf) + imr |= INT_RXUI | INT_RXOI; + + spi_umask_intr(fts, imr); + + reinit_completion(&fts->dma_completion); + + fts->transfer_handler = phytium_spi_dma_transfer_handler; + + return 0; +} + +static int phytium_spi_dma_transfer_all(struct phytium_spi *fts, + struct spi_transfer *xfer) +{ + int ret; + + /* Submit the DMA Tx transfer */ + ret = phytium_spi_dma_submit_tx(fts, xfer->tx_sg.sgl, xfer->tx_sg.nents); + if (ret) + goto err_clear_dmac; + + /* Submit the DMA Rx transfer if required */ + if (xfer->rx_buf) { + ret = phytium_spi_dma_submit_rx(fts, xfer->rx_sg.sgl, + xfer->rx_sg.nents); + if (ret) + goto err_clear_dmac; + + /* rx must be started before tx due to spi instinct */ + dma_async_issue_pending(fts->rxchan); + } + + dma_async_issue_pending(fts->txchan); + + ret = phytium_spi_dma_wait(fts, xfer->len, xfer->effective_speed_hz); + +err_clear_dmac: + phytium_writel(fts, DMACR, 0); + + return ret; +} + +static int phytium_spi_dma_transfer_one(struct phytium_spi *fts, + struct spi_transfer *xfer) +{ + struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; + unsigned int tx_len = 0, rx_len = 0; + unsigned int base, len; + int ret; + + sg_init_table(&tx_tmp, 1); + sg_init_table(&rx_tmp, 1); + + for (base = 0, len = 0; base < xfer->len; base += len) { + /* Fetch next Tx DMA data chunk */ + if (!tx_len) { + tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); + sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); + tx_len = sg_dma_len(tx_sg); + } + + /* Fetch next Rx DMA data chunk */ + if (!rx_len) { + rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); + sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); + rx_len = sg_dma_len(rx_sg); + } + + if ((base + DMA_MAX_BUF_SIZE) > xfer->len) + len = xfer->len - base; + else + len = DMA_MAX_BUF_SIZE; + + len = min3(len, tx_len, rx_len); + + sg_dma_len(&tx_tmp) = len; + sg_dma_len(&rx_tmp) = len; + + /* Submit DMA Tx transfer */ + ret = phytium_spi_dma_submit_tx(fts, &tx_tmp, 1); + if (ret) + break; + + /* Submit DMA Rx transfer */ + ret = phytium_spi_dma_submit_rx(fts, &rx_tmp, 1); + if (ret) + break; + + /* Rx must be started before Tx due to SPI instinct */ + dma_async_issue_pending(fts->rxchan); + + dma_async_issue_pending(fts->txchan); + + /* + * Here we only need to wait for the DMA transfer to be + * finished since SPI controller is kept enabled during the + * procedure this loop implements and there is no risk to lose + * data left in the Tx/Rx FIFOs. + */ + ret = phytium_spi_dma_wait(fts, len, xfer->effective_speed_hz); + if (ret) + break; + + reinit_completion(&fts->dma_completion); + + sg_dma_address(&tx_tmp) += len; + sg_dma_address(&rx_tmp) += len; + tx_len -= len; + rx_len -= len; + } + + phytium_writel(fts, DMACR, 0); + + return ret; +} + +static int phytium_spi_dma_transfer(struct phytium_spi *fts, struct spi_transfer *xfer) +{ + unsigned int nents; + int ret; + + nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); + + /* + * large transfer length caused spi RX FIFO full event + * transfer 4096 bytes each time + */ + if (xfer->len <= DMA_MAX_BUF_SIZE) + ret = phytium_spi_dma_transfer_all(fts, xfer); + else + ret = phytium_spi_dma_transfer_one(fts, xfer); + if (ret) + return ret; + + if (fts->master->cur_msg->status == -EINPROGRESS) { + ret = phytium_spi_dma_wait_tx_done(fts, xfer); + if (ret) + return ret; + } + + if (xfer->rx_buf && fts->master->cur_msg->status == -EINPROGRESS) + ret = phytium_spi_dma_wait_rx_done(fts); + + return ret; +} + +static void phytium_spi_dma_stop(struct phytium_spi *fts) +{ + if (test_bit(TX_BUSY, &fts->dma_chan_busy)) { + dmaengine_terminate_sync(fts->txchan); + clear_bit(TX_BUSY, &fts->dma_chan_busy); + } + if (test_bit(RX_BUSY, &fts->dma_chan_busy)) { + dmaengine_terminate_sync(fts->rxchan); + clear_bit(RX_BUSY, &fts->dma_chan_busy); + } +} + +static const struct phytium_spi_dma_ops phytium_spi_dma_generic_ops = { + .dma_init = phytium_spi_dma_init, + .dma_exit = phytium_spi_dma_exit, + .dma_setup = phytium_spi_dma_setup, + .can_dma = phytium_spi_can_dma, + .dma_transfer = phytium_spi_dma_transfer, + .dma_stop = phytium_spi_dma_stop, +}; + +void phytium_spi_dmaops_set(struct phytium_spi *fts) +{ + fts->dma_ops = &phytium_spi_dma_generic_ops; +} +EXPORT_SYMBOL_GPL(phytium_spi_dmaops_set); diff --git a/drivers/spi/spi-phytium-plat.c b/drivers/spi/spi-phytium-plat.c index d2d193053f44eba64a24939aa9f167193e05213d..51805f1b0c0f7f161af91c2d67ba6504998d7244 100644 --- a/drivers/spi/spi-phytium-plat.c +++ b/drivers/spi/spi-phytium-plat.c @@ -4,7 +4,6 @@ * * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. */ - #include #include #include @@ -48,6 +47,7 @@ static int phytium_spi_probe(struct platform_device *pdev) return -EINVAL; } + fts->paddr = mem->start; fts->regs = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(fts->regs)) { dev_err(&pdev->dev, "SPI region map failed\n"); @@ -78,9 +78,7 @@ static int phytium_spi_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "reg-io-width", &fts->reg_io_width); num_cs = 4; - device_property_read_u32(&pdev->dev, "num-cs", &num_cs); - fts->num_cs = num_cs; if (pdev->dev.of_node) { @@ -107,7 +105,7 @@ static int phytium_spi_probe(struct platform_device *pdev) int *cs; struct gpio_desc *gpiod; - n = gpiod_count(&pdev->dev, "cs"); + n = gpiod_count(&pdev->dev, "cs"); cs = devm_kcalloc(&pdev->dev, n, sizeof(int), GFP_KERNEL); fts->cs = cs; @@ -129,6 +127,14 @@ static int phytium_spi_probe(struct platform_device *pdev) device_property_read_u32(&pdev->dev, "global-cs", &global_cs); fts->global_cs = global_cs; + /* check is use dma transfer */ + if ((device_property_read_string_array(&pdev->dev, "dma-names", + NULL, 0) > 0) && + device_property_present(&pdev->dev, "dmas")) { + fts->dma_en = true; + phytium_spi_dmaops_set(fts); + } + ret = phytium_spi_add_host(&pdev->dev, fts); if (ret) goto out; diff --git a/drivers/spi/spi-phytium.c b/drivers/spi/spi-phytium.c index 468b1558c6eb89805b5c6a23be2282d8cb836bff..a6f849c5cf1ae1e8cf5a908fb3b6e46c20f4b854 100644 --- a/drivers/spi/spi-phytium.c +++ b/drivers/spi/spi-phytium.c @@ -4,7 +4,6 @@ * * Copyright (c) 2019-2023 Phytium Technology Co., Ltd. */ - #include #include #include @@ -24,102 +23,6 @@ #include #include "spi-phytium.h" -static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) -{ - return __raw_readl(fts->regs + offset); -} - -static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) -{ - return __raw_readw(fts->regs + offset); -} - -static inline void phytium_writel(struct phytium_spi *fts, u32 offset, u32 val) -{ - __raw_writel(val, fts->regs + offset); -} - -static inline void phytium_writew(struct phytium_spi *fts, u32 offset, u16 val) -{ - __raw_writew(val, fts->regs + offset); -} - -static inline u32 phytium_read_io_reg(struct phytium_spi *fts, u32 offset) -{ - switch (fts->reg_io_width) { - case 2: - return phytium_readw(fts, offset); - case 4: - default: - return phytium_readl(fts, offset); - } -} - -static inline void phytium_write_io_reg(struct phytium_spi *fts, u32 offset, u32 val) -{ - switch (fts->reg_io_width) { - case 2: - phytium_writew(fts, offset, val); - break; - case 4: - default: - phytium_writel(fts, offset, val); - break; - } -} - -static inline void spi_enable_chip(struct phytium_spi *fts, int enable) -{ - phytium_writel(fts, SSIENR, (enable ? 1 : 0)); -} - -static inline void spi_set_clk(struct phytium_spi *fts, u16 div) -{ - phytium_writel(fts, BAUDR, div); -} - -static inline void spi_mask_intr(struct phytium_spi *fts, u32 mask) -{ - u32 new_mask; - - new_mask = phytium_readl(fts, IMR) & ~mask; - phytium_writel(fts, IMR, new_mask); -} - -static inline void spi_umask_intr(struct phytium_spi *fts, u32 mask) -{ - u32 new_mask; - - new_mask = phytium_readl(fts, IMR) | mask; - phytium_writel(fts, IMR, new_mask); -} - -static inline void spi_global_cs(struct phytium_spi *fts) -{ - u32 global_cs_en, mask, setmask; - - mask = GENMASK(fts->num_cs-1, 0) << fts->num_cs; - setmask = ~GENMASK(fts->num_cs-1, 0); - global_cs_en = (phytium_readl(fts, GCSR) | mask) & setmask; - - phytium_writel(fts, GCSR, global_cs_en); -} - -static inline void spi_reset_chip(struct phytium_spi *fts) -{ - spi_enable_chip(fts, 0); - if (fts->global_cs) - spi_global_cs(fts); - spi_mask_intr(fts, 0xff); - spi_enable_chip(fts, 1); -} - -static inline void spi_shutdown_chip(struct phytium_spi *fts) -{ - spi_enable_chip(fts, 0); - spi_set_clk(fts, 0); -} - struct phytium_spi_chip { u8 poll_mode; u8 type; @@ -215,6 +118,42 @@ static void phytium_reader(struct phytium_spi *fts) } } +int phytium_spi_check_status(struct phytium_spi *fts, bool raw) +{ + u32 irq_status; + int ret = 0; + + if (raw) + irq_status = phytium_readl(fts, RISR); + else + irq_status = phytium_readl(fts, ISR); + + if (irq_status & INT_RXOI) { + dev_err(&fts->master->dev, "RX FIFO overflow detected\n"); + ret = -EIO; + } + + if (irq_status & INT_RXUI) { + dev_err(&fts->master->dev, "RX FIFO underflow detected\n"); + ret = -EIO; + } + + if (irq_status & INT_TXOI) { + dev_err(&fts->master->dev, "TX FIFO overflow detected\n"); + ret = -EIO; + } + + /* Generically handle the erroneous situation */ + if (ret) { + spi_reset_chip(fts); + if (fts->master->cur_msg) + fts->master->cur_msg->status = ret; + } + + return ret; +} +EXPORT_SYMBOL_GPL(phytium_spi_check_status); + static void int_error_stop(struct phytium_spi *fts, const char *msg) { spi_reset_chip(fts); @@ -289,7 +228,9 @@ static int phytium_spi_transfer_one(struct spi_master *master, u16 txlevel = 0; u16 clk_div; u32 cr0; + int ret = 0; + fts->dma_mapped = 0; fts->tx = (void *)transfer->tx_buf; fts->tx_end = fts->tx + transfer->len; fts->rx = transfer->rx_buf; @@ -298,12 +239,15 @@ static int phytium_spi_transfer_one(struct spi_master *master, spi_enable_chip(fts, 0); - if (transfer->speed_hz != chip->speed_hz) { - clk_div = (fts->max_freq / transfer->speed_hz + 1) & 0xfffe; - - chip->speed_hz = transfer->speed_hz; - chip->clk_div = clk_div; + if (transfer->speed_hz != fts->current_freq) { + if (transfer->speed_hz != chip->speed_hz) { + clk_div = (fts->max_freq / transfer->speed_hz + 1) & + 0xfffe; + chip->speed_hz = transfer->speed_hz; + chip->clk_div = clk_div; + } + fts->current_freq = transfer->speed_hz; spi_set_clk(fts, chip->clk_div); } @@ -332,11 +276,23 @@ static int phytium_spi_transfer_one(struct spi_master *master, cr0 |= (chip->tmode << TMOD_OFFSET); } - phytium_writel(fts, CTRL0, cr0); + phytium_writel(fts, CTRLR0, cr0); + + /* check if current transfer is a DMA transcation */ + if (master->can_dma && master->can_dma(master, spi, transfer)) + fts->dma_mapped = master->cur_msg_mapped; spi_mask_intr(fts, 0xff); - if (!chip->poll_mode) { + /* DMA setup */ + if (fts->dma_mapped) { + ret = fts->dma_ops->dma_setup(fts, transfer); + if (ret) + return ret; + } + + /* interrupt transfer mode setup */ + if (!chip->poll_mode && !fts->dma_mapped) { txlevel = min_t(u16, fts->fifo_len / 2, fts->len / fts->n_bytes); phytium_writel(fts, TXFLTR, txlevel); @@ -349,6 +305,9 @@ static int phytium_spi_transfer_one(struct spi_master *master, spi_enable_chip(fts, 1); + if (fts->dma_mapped) + return fts->dma_ops->dma_transfer(fts, transfer); + if (chip->poll_mode) return poll_transfer(fts); @@ -360,6 +319,9 @@ static void phytium_spi_handle_err(struct spi_master *master, { struct phytium_spi *fts = spi_master_get_devdata(master); + if (fts->dma_mapped) + fts->dma_ops->dma_stop(fts); + spi_reset_chip(fts); } @@ -397,7 +359,7 @@ static int phytium_spi_setup(struct spi_device *spi) cr0 = (spi->bits_per_word - 1) | (chip->type << FRF_OFFSET) | (spi->mode << MODE_OFFSET) | (chip->tmode << TMOD_OFFSET); - phytium_writel(fts, CTRL0, cr0); + phytium_writel(fts, CTRLR0, cr0); if (gpio_is_valid(spi->cs_gpio)) { ret = gpio_direction_output(spi->cs_gpio, @@ -450,6 +412,7 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) return -ENOMEM; fts->master = master; + fts->dma_addr = (dma_addr_t)(fts->paddr + DR); snprintf(fts->name, sizeof(fts->name), "phytium_spi%d", fts->bus_num); ret = request_irq(fts->irq, phytium_spi_irq, IRQF_SHARED, fts->name, master); @@ -475,6 +438,17 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) spi_hw_init(dev, fts); + + if (fts->dma_ops && fts->dma_ops->dma_init) { + ret = fts->dma_ops->dma_init(dev, fts); + if (ret) { + dev_warn(dev, "DMA init failed\n"); + } else { + master->can_dma = fts->dma_ops->can_dma; + master->flags |= SPI_CONTROLLER_MUST_TX; + } + } + spi_master_set_devdata(master, fts); ret = devm_spi_register_master(dev, master); if (ret) { @@ -485,6 +459,8 @@ int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts) return 0; err_exit: + if (fts->dma_ops && fts->dma_ops->dma_exit) + fts->dma_ops->dma_exit(fts); spi_enable_chip(fts, 0); free_irq(fts->irq, master); err_free_master: @@ -495,6 +471,8 @@ EXPORT_SYMBOL_GPL(phytium_spi_add_host); void phytium_spi_remove_host(struct phytium_spi *fts) { + if (fts->dma_ops && fts->dma_ops->dma_exit) + fts->dma_ops->dma_exit(fts); spi_shutdown_chip(fts); free_irq(fts->irq, fts->master); diff --git a/drivers/spi/spi-phytium.h b/drivers/spi/spi-phytium.h index 9c8a46f3249668e1a0ee82520bdaac5ecb7789e0..003b08f8c5068c1deb00ba56e858881702945f0b 100644 --- a/drivers/spi/spi-phytium.h +++ b/drivers/spi/spi-phytium.h @@ -6,17 +6,22 @@ #include #include -#define CTRL0 0x00 +#define CTRLR0 0x00 #define SSIENR 0x08 -#define SER 0x10 +#define SER 0x10 #define BAUDR 0x14 #define TXFLTR 0x18 #define TXFLR 0x20 #define RXFLR 0x24 -#define IMR 0x2c -#define ISR 0x30 -#define ICR 0x48 -#define DR 0x60 +#define SR 0x28 +#define IMR 0x2c +#define ISR 0x30 +#define RISR 0x34 +#define ICR 0x48 +#define DMACR 0x4C +#define DMATDLR 0x50 +#define DMARDLR 0x54 +#define DR 0x60 #define GCSR 0x100 #define FRF_OFFSET 4 @@ -33,12 +38,41 @@ #define INT_RXUI (1 << 2) #define INT_RXOI (1 << 3) +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +/* Bit fields in DMACR */ +#define SPI_DMA_RDMAE (1 << 0) +#define SPI_DMA_TDMAE (1 << 1) + +#define SPI_WAIT_RETRIES 5 + +struct phytium_spi; + +struct phytium_spi_dma_ops { + int (*dma_init)(struct device *dev, struct phytium_spi *fts); + void (*dma_exit)(struct phytium_spi *fts); + int (*dma_setup)(struct phytium_spi *fts, struct spi_transfer *xfer); + bool (*can_dma)(struct spi_controller *master, struct spi_device *spi, + struct spi_transfer *xfer); + int (*dma_transfer)(struct phytium_spi *fts, struct spi_transfer *xfer); + void (*dma_stop)(struct phytium_spi *fts); +}; + struct phytium_spi { struct spi_master *master; char name[16]; void __iomem *regs; bool global_cs; + bool dma_en; unsigned long paddr; int irq; u32 fifo_len; @@ -54,14 +88,126 @@ struct phytium_spi { void *tx_end; void *rx; void *rx_end; - u8 n_bytes; - struct clk *clk; + u8 n_bytes; + int dma_mapped; + struct clk *clk; irqreturn_t (*transfer_handler)(struct phytium_spi *fts); + u32 current_freq; /* frequency in hz */ + + /* DMA info */ + struct dma_chan *txchan; + u32 txburst; + struct dma_chan *rxchan; + u32 rxburst; + u32 dma_sg_burst; + unsigned long dma_chan_busy; + dma_addr_t dma_addr; /* phy address of the Data register */ + const struct phytium_spi_dma_ops *dma_ops; + struct completion dma_completion; }; +static inline u32 phytium_readl(struct phytium_spi *fts, u32 offset) +{ + return __raw_readl(fts->regs + offset); +} + +static inline u16 phytium_readw(struct phytium_spi *fts, u32 offset) +{ + return __raw_readw(fts->regs + offset); +} + +static inline void phytium_writel(struct phytium_spi *fts, u32 offset, u32 val) +{ + __raw_writel(val, fts->regs + offset); +} + +static inline void phytium_writew(struct phytium_spi *fts, u32 offset, u16 val) +{ + __raw_writew(val, fts->regs + offset); +} + +static inline u32 phytium_read_io_reg(struct phytium_spi *fts, u32 offset) +{ + switch (fts->reg_io_width) { + case 2: + return phytium_readw(fts, offset); + case 4: + default: + return phytium_readl(fts, offset); + } +} + +static inline void phytium_write_io_reg(struct phytium_spi *fts, u32 offset, u32 val) +{ + switch (fts->reg_io_width) { + case 2: + phytium_writew(fts, offset, val); + break; + case 4: + default: + phytium_writel(fts, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct phytium_spi *fts, int enable) +{ + phytium_writel(fts, SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct phytium_spi *fts, u16 div) +{ + phytium_writel(fts, BAUDR, div); +} + +static inline void spi_mask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) & ~mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_umask_intr(struct phytium_spi *fts, u32 mask) +{ + u32 new_mask; + + new_mask = phytium_readl(fts, IMR) | mask; + phytium_writel(fts, IMR, new_mask); +} + +static inline void spi_global_cs(struct phytium_spi *fts) +{ + u32 global_cs_en, mask, setmask; + + mask = GENMASK(fts->num_cs-1, 0) << fts->num_cs; + setmask = ~GENMASK(fts->num_cs-1, 0); + global_cs_en = (phytium_readl(fts, GCSR) | mask) & setmask; + + phytium_writel(fts, GCSR, global_cs_en); +} + +static inline void spi_reset_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + if (fts->global_cs) + spi_global_cs(fts); + spi_mask_intr(fts, 0xff); + spi_enable_chip(fts, 1); +} + +static inline void spi_shutdown_chip(struct phytium_spi *fts) +{ + spi_enable_chip(fts, 0); + spi_set_clk(fts, 0); + fts->current_freq = 0; +} + extern int phytium_spi_add_host(struct device *dev, struct phytium_spi *fts); extern void phytium_spi_remove_host(struct phytium_spi *fts); extern int phytium_spi_suspend_host(struct phytium_spi *fts); extern int phytium_spi_resume_host(struct phytium_spi *fts); +extern void phytium_spi_dmaops_set(struct phytium_spi *fts); +extern int phytium_spi_check_status(struct phytium_spi *fts, bool raw); #endif /* PHYTIUM_SPI_HEADER_H */