Newer
Older
/*
* MUSB OTG driver peripheral support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
#include "musb_core.h"
/* MUSB PERIPHERAL status 3-mar-2006:
*
* - EP0 seems solid. It passes both USBCV and usbtest control cases.
* Minor glitches:
*
* + remote wakeup to Linux hosts work, but saw USBCV failures;
* in one test run (operator error?)
* + endpoint halt tests -- in both usbtest and usbcv -- seem
* to break when dma is enabled ... is something wrongly
* clearing SENDSTALL?
*
* - Mass storage behaved ok when last tested. Network traffic patterns
* (with lots of short transfers etc) need retesting; they turn up the
* worst cases of the DMA, since short packets are typical but are not
* required.
*
* - TX/IN
* + both pio and dma behave in with network and g_zero tests
* + no cppi throughput issues other than no-hw-queueing
* + failed with FLAT_REG (DaVinci)
* + seems to behave with double buffering, PIO -and- CPPI
* + with gadgetfs + AIO, requests got lost?
*
* - RX/OUT
* + both pio and dma behave in with network and g_zero tests
* + dma is slow in typical case (short_not_ok is clear)
* + double buffering ok with PIO
* + double buffering *FAILS* with CPPI, wrong data bytes sometimes
* + request lossage observed with gadgetfs
*
* - ISO not tested ... might work, but only weakly isochronous
*
* - Gadget driver disabling of softconnect during bind() is ignored; so
* drivers can't hold off host requests until userspace is ready.
* (Workaround: they can turn it off later.)
*
* - PORTABILITY (assumes PIO works):
* + DaVinci, basically works with cppi dma
* + OMAP 2430, ditto with mentor dma
* + TUSB 6010, platform-specific dma in the works
*/
/* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb, struct musb_ep *musb_ep)
Mian Yousaf Kaukab
committed
int compatible = true;
struct dma_controller *dma = musb->dma_controller;
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
Mian Yousaf Kaukab
committed
/* Check if DMA engine can handle this request.
* DMA code must reject the USB request explicitly.
* Default behaviour is to map the request.
*/
if (dma->is_compatible)
compatible = dma->is_compatible(musb_ep->dma,
musb_ep->packet_sz, request->request.buf,
request->request.length);
if (!compatible)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single(
musb->controller,
request->request.buf,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = PRE_MAPPED;
}
}
/* Unmap the buffer from dma and maps it back to cpu */
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
if (!is_buffer_mapped(request))
return;
if (request->request.dma == DMA_ADDR_INVALID) {
dev_vdbg(musb->controller,
"not unmapping a never mapped buffer\n");
if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID;
} else { /* PRE_MAPPED */
dma_sync_single_for_cpu(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
request->map_state = UN_MAPPED;
/*
* Immediately complete a request.
*
* @param request the request to complete
* @param status the status to complete the request with
* Context: controller locked, IRQs blocked.
*/
void musb_g_giveback(
struct musb_ep *ep,
struct usb_request *request,
int status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
struct musb_request *req;
struct musb *musb;
int busy = ep->busy;
req = to_musb_request(request);
list_del(&req->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
ep->busy = 1;
spin_unlock(&musb->lock);
unmap_dma_buffer(req, musb);
dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
req->request.actual, req->request.length);
else
dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
ep->end_point.name, request,
req->request.actual, req->request.length,
request->status);
req->request.complete(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
}
/* ----------------------------------------------------------------------- */
/*
* Abort requests queued to an endpoint using the status. Synchronous.
* caller locked controller and blocked irqs, and selected this ep.
*/
static void nuke(struct musb_ep *ep, const int status)
{
struct musb *musb = ep->musb;
struct musb_request *req = NULL;
void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
ep->busy = 1;
if (is_dma_capable() && ep->dma) {
struct dma_controller *c = ep->musb->dma_controller;
int value;
/*
* The programming guide says that we must not clear
* the DMAMODE bit before DMAENAB, so we only
* clear it in the second write...
*/
MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_TXCSR,
0 | MUSB_TXCSR_FLUSHFIFO);
} else {
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
}
value = c->channel_abort(ep->dma);
dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
while (!list_empty(&ep->req_list)) {
req = list_first_entry(&ep->req_list, struct musb_request, list);
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
musb_g_giveback(ep, &req->request, status);
}
}
/* ----------------------------------------------------------------------- */
/* Data transfers - pure PIO, pure DMA, or mixed mode */
/*
* This assumes the separate CPPI engine is responding to DMA requests
* from the usb core ... sequenced a bit differently from mentor dma.
*/
static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
{
if (can_bulk_split(musb, ep->type))
return ep->hw_ep->max_packet_sz_tx;
else
return ep->packet_sz;
}
#ifdef CONFIG_USB_INVENTRA_DMA
/* Peripheral tx (IN) using Mentor DMA works as follows:
Only mode 0 is used for transfers <= wPktSize,
mode 1 is used for larger transfers,
One of the following happens:
- Host sends IN token which causes an endpoint interrupt
-> TxAvail
-> if DMA is currently busy, exit.
-> if queue is non-empty, txstate().
- Request is queued by the gadget driver.
-> if queue was previously empty, txstate()
txstate()
-> start
/\ -> setup DMA
| (data is transferred to the FIFO, then sent out when
| IN token(s) are recd from Host.
| -> DMA interrupt on completion
| calls TxAvail.
| -> stop DMA, ~DMAENAB,
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
| -> set TxPktRdy for last short pkt or zlp
| -> Complete Request
| -> Continue next request (call txstate)
|___________________________________|
* Non-Mentor DMA engines can of course work differently, such as by
* upleveling from irq-per-packet to irq-per-buffer.
*/
#endif
/*
* An endpoint is transmitting data. This can be called either from
* the IRQ routine or from ep.queue() to kickstart a request on an
* endpoint.
*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void txstate(struct musb *musb, struct musb_request *req)
{
u8 epnum = req->epnum;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct usb_request *request;
u16 fifo_count = 0, csr;
int use_dma = 0;
musb_ep = req->ep;
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "dma pending...\n");
return;
}
/* read TXCSR before */
csr = musb_readw(epio, MUSB_TXCSR);
request = &req->request;
fifo_count = min(max_ep_writesize(musb, musb_ep),
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
epnum, musb_ep->packet_sz, fifo_count,
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
/* setup DMA, then program endpoint CSR */
request_size = min_t(size_t, request->length - request->actual,
musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID);
/* MUSB_TXCSR_P_ISO is still set correctly */
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
/*
* We must not clear the DMAMODE bit
* before the DMAENAB bit -- and the
* latter doesn't always get cleared
* before we get here...
*/
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr
| MUSB_TXCSR_P_WZC_BITS);
csr &= ~MUSB_TXCSR_DMAMODE;
csr |= (MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_MODE);
/* against programming guide */
} else {
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
if (!musb_ep->hb_mult)
csr |= MUSB_TXCSR_AUTOSET;
}
musb_writew(epio, MUSB_TXCSR, csr);
}
}
#elif defined(CONFIG_USB_TI_CPPI_DMA)
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_MODE;
musb_writew(epio, MUSB_TXCSR,
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
| csr);
/* ensure writebuffer is empty */
csr = musb_readw(epio, MUSB_TXCSR);
/* NOTE host side sets DMAENAB later than this; both are
* OK since the transfer dma glue (between CPPI and Mentor
* fifos) just tells CPPI it could start. Data only moves
* to the USB TX fifo when both fifos are ready.
*/
/* "mode" is irrelevant here; handle terminating ZLPs like
* PIO does, since the hardware RNDIS mode seems unreliable
* except for the last-packet-is-already-short case.
*/
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
request->dma + request->actual,
request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
/* invariant: prequest->buf is non-null */
}
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma + request->actual,
request_size);
#endif
}
#endif
if (!use_dma) {
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails
*/
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
request->actual += fifo_count;
csr |= MUSB_TXCSR_TXPKTRDY;
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
/* host may already have the data when this message shows... */
dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
fifo_count,
musb_readw(epio, MUSB_TXMAXP));
}
/*
* FIFO state update (e.g. data ready).
* Called from IRQ, with controller locked.
*/
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
request = &req->request;
dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
/*
* REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
* probably rates reporting as a host error.
*/
if (csr & MUSB_TXCSR_P_SENTSTALL) {
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~MUSB_TXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_TXCSR, csr);
return;
}
if (csr & MUSB_TXCSR_P_UNDERRUN) {
/* We NAKed, no big deal... little reason to care. */
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
epnum, request);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/*
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
if (request) {
u8 is_dma = 0;
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
epnum, csr, musb_ep->dma->actual_len, request);
/*
* First, maybe a terminating short packet. Some DMA
* engines might handle this by themselves.
*/
if ((request->zero && request->length
&& (request->length % musb_ep->packet_sz == 0)
&& (request->actual == request->length))
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
|| (is_dma && (!dma->desired_mode ||
(request->actual &
(musb_ep->packet_sz - 1))))
) {
/*
* On DMA completion, FIFO may not be
* available yet...
*/
if (csr & MUSB_TXCSR_TXPKTRDY)
return;
dev_dbg(musb->controller, "sending zero pkt\n");
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY);
request->zero = 0;
}
if (request->actual == request->length) {
musb_g_giveback(musb_ep, request, 0);
req = musb_ep->desc ? next_request(musb_ep) : NULL;
if (!req) {
dev_dbg(musb->controller, "%s idle now\n",
musb_ep->end_point.name);
return;
txstate(musb, req);
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
}
/* ------------------------------------------------------------ */
#ifdef CONFIG_USB_INVENTRA_DMA
/* Peripheral rx (OUT) using Mentor DMA works as follows:
- Only mode 0 is used.
- Request is queued by the gadget class driver.
-> if queue was previously empty, rxstate()
- Host sends OUT token which causes an endpoint interrupt
/\ -> RxReady
| -> if request queued, call rxstate
| /\ -> setup DMA
| | -> DMA interrupt on completion
| | -> RxReady
| | -> stop DMA
| | -> ack the read
| | -> if data recd = max expected
| | by the request, or host
| | sent a short packet,
| | complete the request,
| | and start the next one.
| |_____________________________________|
| else just wait for the host
| to send the next OUT token.
|__________________________________________________|
* Non-Mentor DMA engines can of course work differently.
*/
#endif
/*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void rxstate(struct musb *musb, struct musb_request *req)
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned fifo_count = 0;
u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
len = musb_ep->packet_sz;
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "DMA pending...\n");
return;
}
if (csr & MUSB_RXCSR_P_SENDSTALL) {
dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
musb_ep->end_point.name, csr);
return;
}
if (is_cppi_enabled() && is_buffer_mapped(req)) {
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
/* NOTE: CPPI won't actually stop advancing the DMA
* queue after short packet transfers, so this is almost
* always going to run as IRQ-per-packet DMA so that
* faults will be handled correctly.
*/
if (c->channel_program(channel,
musb_ep->packet_sz,
!request->short_not_ok,
request->dma + request->actual,
request->length - request->actual)) {
/* make sure that if an rxpkt arrived after the irq,
* the cppi engine will be ready to take it as soon
* as DMA is enabled
*/
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAMODE);
csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
musb_writew(epio, MUSB_RXCSR, csr);
return;
}
}
if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT);
/*
* Enable Mode 1 on RX transfers only when short_not_ok flag
* is set. Currently short_not_ok flag is set only from
* file_storage and f_mass_storage drivers
*/
if (request->short_not_ok && len == musb_ep->packet_sz)
use_mode_1 = 1;
else
use_mode_1 = 0;
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_buffer_mapped(req)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
* We could operate in DMA mode 1 if we knew the size of the tranfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
* REVISIT an updated g_file_storage can set req->short_not_ok, which
* then becomes usable as a runtime "use mode 1" hint...
*/
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR, csr);
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
/*
* this special sequence (enabling and then
* disabling MUSB_RXCSR_DMAMODE) is required
* to get DMAReq to activate
*/
musb_writew(epio, MUSB_RXCSR,
csr | MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR, csr);
} else {
if (!musb_ep->hb_mult &&
musb_ep->hw_ep->rx_double_buffered)
csr |= MUSB_RXCSR_AUTOCLEAR;
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (request->actual < request->length) {
int transfer_size = 0;
if (use_mode_1) {
transfer_size = min(request->length - request->actual,
channel->max_len);
} else {
transfer_size = min(request->length - request->actual,
(unsigned)len);
musb_ep->dma->desired_mode = 0;
}
use_dma = c->channel_program(
channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size);
}
if (use_dma)
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
return;
}
#elif defined(CONFIG_USB_UX500_DMA)
if ((is_buffer_mapped(req)) &&
(request->actual < request->length)) {
struct dma_controller *c;
struct dma_channel *channel;
int transfer_size = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
/* In case first packet is short */
if (len < musb_ep->packet_sz)
transfer_size = len;
else if (request->short_not_ok)
transfer_size = min(request->length -
request->actual,
channel->max_len);
else
transfer_size = min(request->length -
request->actual,
(unsigned)len);
csr &= ~MUSB_RXCSR_DMAMODE;
csr |= (MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
if (transfer_size <= musb_ep->packet_sz) {
musb_ep->dma->desired_mode = 0;
} else {
musb_ep->dma->desired_mode = 1;
/* Mode must be set after DMAENAB */
csr |= MUSB_RXCSR_DMAMODE;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size))
return;
}
#endif /* Mentor's DMA */
fifo_count = request->length - request->actual;
dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
musb_ep->end_point.name,
len, fifo_count,
musb_ep->packet_sz);
fifo_count = min_t(unsigned, len, fifo_count);
if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
int ret;
ret = c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
dma_addr,
fifo_count);
if (ret)
return;
}
#endif
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb);
/*
* Clear DMAENAB and AUTOCLEAR for the
* PIO mode transfer
*/
csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
/* REVISIT if we left anything in the fifo, flush
* it and report -EOVERFLOW
*/
/* ack the read! */
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
}
/* reach the end or short packet detected */
if (request->actual == request->length || len < musb_ep->packet_sz)
musb_g_giveback(musb_ep, request, 0);
}
/*
* Data ready for a request; called from IRQ
*/
void musb_g_rx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
req = next_request(musb_ep);
if (!req)
request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (csr & MUSB_RXCSR_P_OVERRUN) {
/* csr |= MUSB_RXCSR_P_WZC_BITS; */
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
dev_dbg(musb->controller, "%s busy, csr %04x\n",
}
if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_P_WZC_BITS | csr);
request->actual += musb_ep->dma->actual_len;
dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
epnum, csr,
musb_readw(epio, MUSB_RXCSR),
musb_ep->dma->actual_len, request);
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
|| (dma->actual_len
& (musb_ep->packet_sz - 1))) {
/* ack the read! */
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
/* incomplete, and not short? wait for next IN packet */
if ((request->actual < request->length)
&& (musb_ep->dma->actual_len
== musb_ep->packet_sz)) {
/* In double buffer case, continue to unload fifo if
* there is Rx packet in FIFO.
**/
csr = musb_readw(epio, MUSB_RXCSR);
if ((csr & MUSB_RXCSR_RXPKTRDY) &&
hw_ep->rx_double_buffered)
goto exit;
#endif
musb_g_giveback(musb_ep, request, 0);
req = next_request(musb_ep);
if (!req)
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Analyze request */
rxstate(musb, req);
}
/* ------------------------------------------------------------ */