Newer
Older
/*
* MUSB OTG driver peripheral support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
#include "musb_core.h"
/* MUSB PERIPHERAL status 3-mar-2006:
*
* - EP0 seems solid. It passes both USBCV and usbtest control cases.
* Minor glitches:
*
* + remote wakeup to Linux hosts work, but saw USBCV failures;
* in one test run (operator error?)
* + endpoint halt tests -- in both usbtest and usbcv -- seem
* to break when dma is enabled ... is something wrongly
* clearing SENDSTALL?
*
* - Mass storage behaved ok when last tested. Network traffic patterns
* (with lots of short transfers etc) need retesting; they turn up the
* worst cases of the DMA, since short packets are typical but are not
* required.
*
* - TX/IN
* + both pio and dma behave in with network and g_zero tests
* + no cppi throughput issues other than no-hw-queueing
* + failed with FLAT_REG (DaVinci)
* + seems to behave with double buffering, PIO -and- CPPI
* + with gadgetfs + AIO, requests got lost?
*
* - RX/OUT
* + both pio and dma behave in with network and g_zero tests
* + dma is slow in typical case (short_not_ok is clear)
* + double buffering ok with PIO
* + double buffering *FAILS* with CPPI, wrong data bytes sometimes
* + request lossage observed with gadgetfs
*
* - ISO not tested ... might work, but only weakly isochronous
*
* - Gadget driver disabling of softconnect during bind() is ignored; so
* drivers can't hold off host requests until userspace is ready.
* (Workaround: they can turn it off later.)
*
* - PORTABILITY (assumes PIO works):
* + DaVinci, basically works with cppi dma
* + OMAP 2430, ditto with mentor dma
* + TUSB 6010, platform-specific dma in the works
*/
/* ----------------------------------------------------------------------- */
/*
* Immediately complete a request.
*
* @param request the request to complete
* @param status the status to complete the request with
* Context: controller locked, IRQs blocked.
*/
void musb_g_giveback(
struct musb_ep *ep,
struct usb_request *request,
int status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
struct musb_request *req;
struct musb *musb;
int busy = ep->busy;
req = to_musb_request(request);
list_del(&request->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
ep->busy = 1;
spin_unlock(&musb->lock);
if (is_dma_capable()) {
if (req->mapped) {
dma_unmap_single(musb->controller,
req->request.dma,
req->request.length,
req->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
req->request.dma = DMA_ADDR_INVALID;
req->mapped = 0;
} else if (req->request.dma != DMA_ADDR_INVALID)
dma_sync_single_for_cpu(musb->controller,
req->request.dma,
req->request.length,
req->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
if (request->status == 0)
DBG(5, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
req->request.actual, req->request.length);
else
DBG(2, "%s request %p, %d/%d fault %d\n",
ep->end_point.name, request,
req->request.actual, req->request.length,
request->status);
req->request.complete(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
}
/* ----------------------------------------------------------------------- */
/*
* Abort requests queued to an endpoint using the status. Synchronous.
* caller locked controller and blocked irqs, and selected this ep.
*/
static void nuke(struct musb_ep *ep, const int status)
{
struct musb_request *req = NULL;
void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
ep->busy = 1;
if (is_dma_capable() && ep->dma) {
struct dma_controller *c = ep->musb->dma_controller;
int value;
/*
* The programming guide says that we must not clear
* the DMAMODE bit before DMAENAB, so we only
* clear it in the second write...
*/
MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_TXCSR,
0 | MUSB_TXCSR_FLUSHFIFO);
} else {
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
}
value = c->channel_abort(ep->dma);
DBG(value ? 1 : 6, "%s: abort DMA --> %d\n", ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
while (!list_empty(&(ep->req_list))) {
req = container_of(ep->req_list.next, struct musb_request,
request.list);
musb_g_giveback(ep, &req->request, status);
}
}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
/* ----------------------------------------------------------------------- */
/* Data transfers - pure PIO, pure DMA, or mixed mode */
/*
* This assumes the separate CPPI engine is responding to DMA requests
* from the usb core ... sequenced a bit differently from mentor dma.
*/
static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
{
if (can_bulk_split(musb, ep->type))
return ep->hw_ep->max_packet_sz_tx;
else
return ep->packet_sz;
}
#ifdef CONFIG_USB_INVENTRA_DMA
/* Peripheral tx (IN) using Mentor DMA works as follows:
Only mode 0 is used for transfers <= wPktSize,
mode 1 is used for larger transfers,
One of the following happens:
- Host sends IN token which causes an endpoint interrupt
-> TxAvail
-> if DMA is currently busy, exit.
-> if queue is non-empty, txstate().
- Request is queued by the gadget driver.
-> if queue was previously empty, txstate()
txstate()
-> start
/\ -> setup DMA
| (data is transferred to the FIFO, then sent out when
| IN token(s) are recd from Host.
| -> DMA interrupt on completion
| calls TxAvail.
| -> stop DMA, ~DMAENAB,
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
| -> set TxPktRdy for last short pkt or zlp
| -> Complete Request
| -> Continue next request (call txstate)
|___________________________________|
* Non-Mentor DMA engines can of course work differently, such as by
* upleveling from irq-per-packet to irq-per-buffer.
*/
#endif
/*
* An endpoint is transmitting data. This can be called either from
* the IRQ routine or from ep.queue() to kickstart a request on an
* endpoint.
*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void txstate(struct musb *musb, struct musb_request *req)
{
u8 epnum = req->epnum;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct usb_request *request;
u16 fifo_count = 0, csr;
int use_dma = 0;
musb_ep = req->ep;
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
DBG(4, "dma pending...\n");
return;
}
/* read TXCSR before */
csr = musb_readw(epio, MUSB_TXCSR);
request = &req->request;
fifo_count = min(max_ep_writesize(musb, musb_ep),
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
DBG(5, "%s old packet still ready , txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
DBG(5, "%s stalling, txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
epnum, musb_ep->packet_sz, fifo_count,
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_dma_capable() && musb_ep->dma) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
/* setup DMA, then program endpoint CSR */
request_size = min_t(size_t, request->length - request->actual,
musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID);
/* MUSB_TXCSR_P_ISO is still set correctly */
#ifdef CONFIG_USB_INVENTRA_DMA
{
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
/*
* We must not clear the DMAMODE bit
* before the DMAENAB bit -- and the
* latter doesn't always get cleared
* before we get here...
*/
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr
| MUSB_TXCSR_P_WZC_BITS);
csr &= ~MUSB_TXCSR_DMAMODE;
csr |= (MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_MODE);
/* against programming guide */
} else {
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
if (!musb_ep->hb_mult)
csr |= MUSB_TXCSR_AUTOSET;
}
musb_writew(epio, MUSB_TXCSR, csr);
}
}
#elif defined(CONFIG_USB_TI_CPPI_DMA)
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_MODE;
musb_writew(epio, MUSB_TXCSR,
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
| csr);
/* ensure writebuffer is empty */
csr = musb_readw(epio, MUSB_TXCSR);
/* NOTE host side sets DMAENAB later than this; both are
* OK since the transfer dma glue (between CPPI and Mentor
* fifos) just tells CPPI it could start. Data only moves
* to the USB TX fifo when both fifos are ready.
*/
/* "mode" is irrelevant here; handle terminating ZLPs like
* PIO does, since the hardware RNDIS mode seems unreliable
* except for the last-packet-is-already-short case.
*/
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
request->dma + request->actual,
request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
/* invariant: prequest->buf is non-null */
}
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma + request->actual,
request_size);
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
#endif
}
#endif
if (!use_dma) {
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
request->actual += fifo_count;
csr |= MUSB_TXCSR_TXPKTRDY;
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
/* host may already have the data when this message shows... */
DBG(3, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
fifo_count,
musb_readw(epio, MUSB_TXMAXP));
}
/*
* FIFO state update (e.g. data ready).
* Called from IRQ, with controller locked.
*/
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
request = next_request(musb_ep);
csr = musb_readw(epio, MUSB_TXCSR);
DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
/*
* REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
* probably rates reporting as a host error.
*/
if (csr & MUSB_TXCSR_P_SENTSTALL) {
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~MUSB_TXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_TXCSR, csr);
return;
}
if (csr & MUSB_TXCSR_P_UNDERRUN) {
/* We NAKed, no big deal... little reason to care. */
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
DBG(20, "underrun on ep%d, req %p\n", epnum, request);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/*
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
return;
}
if (request) {
u8 is_dma = 0;
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
epnum, csr, musb_ep->dma->actual_len, request);
/*
* First, maybe a terminating short packet. Some DMA
* engines might handle this by themselves.
*/
if ((request->zero && request->length
&& (request->length % musb_ep->packet_sz == 0)
&& (request->actual == request->length))
Loading
Loading full blame...