Newer
Older
/*
* MUSB OTG driver peripheral support
*
* Copyright 2005 Mentor Graphics Corporation
* Copyright (C) 2005-2006 by Texas Instruments
* Copyright (C) 2006-2007 Nokia Corporation
* Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "musb_core.h"
/* ----------------------------------------------------------------------- */
#define is_buffer_mapped(req) (is_dma_capable() && \
(req->map_state != UN_MAPPED))
/* Maps the buffer to dma */
static inline void map_dma_buffer(struct musb_request *request,
struct musb *musb, struct musb_ep *musb_ep)
Mian Yousaf Kaukab
committed
int compatible = true;
struct dma_controller *dma = musb->dma_controller;
request->map_state = UN_MAPPED;
if (!is_dma_capable() || !musb_ep->dma)
return;
Mian Yousaf Kaukab
committed
/* Check if DMA engine can handle this request.
* DMA code must reject the USB request explicitly.
* Default behaviour is to map the request.
*/
if (dma->is_compatible)
compatible = dma->is_compatible(musb_ep->dma,
musb_ep->packet_sz, request->request.buf,
request->request.length);
if (!compatible)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
request->request.dma = dma_map_single(
musb->controller,
request->request.buf,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->map_state = PRE_MAPPED;
}
}
/* Unmap the buffer from dma and maps it back to cpu */
static inline void unmap_dma_buffer(struct musb_request *request,
struct musb *musb)
{
struct musb_ep *musb_ep = request->ep;
if (!is_buffer_mapped(request) || !musb_ep->dma)
return;
if (request->request.dma == DMA_ADDR_INVALID) {
dev_vdbg(musb->controller,
"not unmapping a never mapped buffer\n");
if (request->map_state == MUSB_MAPPED) {
dma_unmap_single(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
request->request.dma = DMA_ADDR_INVALID;
} else { /* PRE_MAPPED */
dma_sync_single_for_cpu(musb->controller,
request->request.dma,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
}
request->map_state = UN_MAPPED;
/*
* Immediately complete a request.
*
* @param request the request to complete
* @param status the status to complete the request with
* Context: controller locked, IRQs blocked.
*/
void musb_g_giveback(
struct musb_ep *ep,
struct usb_request *request,
int status)
__releases(ep->musb->lock)
__acquires(ep->musb->lock)
{
struct musb_request *req;
struct musb *musb;
int busy = ep->busy;
req = to_musb_request(request);
list_del(&req->list);
if (req->request.status == -EINPROGRESS)
req->request.status = status;
musb = req->musb;
ep->busy = 1;
spin_unlock(&musb->lock);
if (!dma_mapping_error(&musb->g.dev, request->dma))
unmap_dma_buffer(req, musb);
dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
ep->end_point.name, request,
req->request.actual, req->request.length);
else
dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
ep->end_point.name, request,
req->request.actual, req->request.length,
request->status);
req->request.complete(&req->ep->end_point, &req->request);
spin_lock(&musb->lock);
ep->busy = busy;
}
/* ----------------------------------------------------------------------- */
/*
* Abort requests queued to an endpoint using the status. Synchronous.
* caller locked controller and blocked irqs, and selected this ep.
*/
static void nuke(struct musb_ep *ep, const int status)
{
struct musb *musb = ep->musb;
struct musb_request *req = NULL;
void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
ep->busy = 1;
if (is_dma_capable() && ep->dma) {
struct dma_controller *c = ep->musb->dma_controller;
int value;
/*
* The programming guide says that we must not clear
* the DMAMODE bit before DMAENAB, so we only
* clear it in the second write...
*/
MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_TXCSR,
0 | MUSB_TXCSR_FLUSHFIFO);
} else {
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
musb_writew(epio, MUSB_RXCSR,
0 | MUSB_RXCSR_FLUSHFIFO);
}
value = c->channel_abort(ep->dma);
dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
ep->name, value);
c->channel_release(ep->dma);
ep->dma = NULL;
}
while (!list_empty(&ep->req_list)) {
req = list_first_entry(&ep->req_list, struct musb_request, list);
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
musb_g_giveback(ep, &req->request, status);
}
}
/* ----------------------------------------------------------------------- */
/* Data transfers - pure PIO, pure DMA, or mixed mode */
/*
* This assumes the separate CPPI engine is responding to DMA requests
* from the usb core ... sequenced a bit differently from mentor dma.
*/
static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
{
if (can_bulk_split(musb, ep->type))
return ep->hw_ep->max_packet_sz_tx;
else
return ep->packet_sz;
}
/*
* An endpoint is transmitting data. This can be called either from
* the IRQ routine or from ep.queue() to kickstart a request on an
* endpoint.
*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void txstate(struct musb *musb, struct musb_request *req)
{
u8 epnum = req->epnum;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct usb_request *request;
u16 fifo_count = 0, csr;
int use_dma = 0;
musb_ep = req->ep;
/* Check if EP is disabled */
if (!musb_ep->desc) {
dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
musb_ep->end_point.name);
return;
}
/* we shouldn't get here while DMA is active ... but we do ... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "dma pending...\n");
return;
}
/* read TXCSR before */
csr = musb_readw(epio, MUSB_TXCSR);
request = &req->request;
fifo_count = min(max_ep_writesize(musb, musb_ep),
(int)(request->length - request->actual));
if (csr & MUSB_TXCSR_TXPKTRDY) {
dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
if (csr & MUSB_TXCSR_P_SENDSTALL) {
dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
musb_ep->end_point.name, csr);
return;
}
dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
epnum, musb_ep->packet_sz, fifo_count,
csr);
#ifndef CONFIG_MUSB_PIO_ONLY
if (is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
size_t request_size;
/* setup DMA, then program endpoint CSR */
request_size = min_t(size_t, request->length - request->actual,
musb_ep->dma->max_len);
use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
/* MUSB_TXCSR_P_ISO is still set correctly */
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
if (request_size < musb_ep->packet_sz)
musb_ep->dma->desired_mode = 0;
else
musb_ep->dma->desired_mode = 1;
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
musb_ep->dma->desired_mode,
request->dma + request->actual, request_size);
if (use_dma) {
if (musb_ep->dma->desired_mode == 0) {
/*
* We must not clear the DMAMODE bit
* before the DMAENAB bit -- and the
* latter doesn't always get cleared
* before we get here...
*/
csr &= ~(MUSB_TXCSR_AUTOSET
| MUSB_TXCSR_DMAENAB);
musb_writew(epio, MUSB_TXCSR, csr
| MUSB_TXCSR_P_WZC_BITS);
csr &= ~MUSB_TXCSR_DMAMODE;
csr |= (MUSB_TXCSR_DMAENAB |
MUSB_TXCSR_MODE);
/* against programming guide */
} else {
csr |= (MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_DMAMODE
| MUSB_TXCSR_MODE);
/*
* Enable Autoset according to table
* below
* bulk_split hb_mult Autoset_Enable
* 0 0 Yes(Normal)
* 0 >0 No(High BW ISO)
* 1 0 Yes(HS bulk)
* 1 >0 Yes(FS bulk)
*/
if (!musb_ep->hb_mult ||
(musb_ep->hb_mult &&
can_bulk_split(musb,
musb_ep->type)))
csr |= MUSB_TXCSR_AUTOSET;
}
musb_writew(epio, MUSB_TXCSR, csr);
}
}
#elif defined(CONFIG_USB_TI_CPPI_DMA)
/* program endpoint CSR first, then setup DMA */
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
MUSB_TXCSR_MODE;
musb_writew(epio, MUSB_TXCSR,
(MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
| csr);
/* ensure writebuffer is empty */
csr = musb_readw(epio, MUSB_TXCSR);
/* NOTE host side sets DMAENAB later than this; both are
* OK since the transfer dma glue (between CPPI and Mentor
* fifos) just tells CPPI it could start. Data only moves
* to the USB TX fifo when both fifos are ready.
*/
/* "mode" is irrelevant here; handle terminating ZLPs like
* PIO does, since the hardware RNDIS mode seems unreliable
* except for the last-packet-is-already-short case.
*/
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
0,
request->dma + request->actual,
request_size);
if (!use_dma) {
c->channel_release(musb_ep->dma);
musb_ep->dma = NULL;
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
/* invariant: prequest->buf is non-null */
}
#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
use_dma = use_dma && c->channel_program(
musb_ep->dma, musb_ep->packet_sz,
request->zero,
request->dma + request->actual,
request_size);
#endif
}
#endif
if (!use_dma) {
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails
*/
unmap_dma_buffer(req, musb);
musb_write_fifo(musb_ep->hw_ep, fifo_count,
(u8 *) (request->buf + request->actual));
request->actual += fifo_count;
csr |= MUSB_TXCSR_TXPKTRDY;
csr &= ~MUSB_TXCSR_P_UNDERRUN;
musb_writew(epio, MUSB_TXCSR, csr);
}
/* host may already have the data when this message shows... */
dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
musb_ep->end_point.name, use_dma ? "dma" : "pio",
request->actual, request->length,
musb_readw(epio, MUSB_TXCSR),
fifo_count,
musb_readw(epio, MUSB_TXMAXP));
}
/*
* FIFO state update (e.g. data ready).
* Called from IRQ, with controller locked.
*/
void musb_g_tx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
u8 __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
request = &req->request;
dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
dma = is_dma_capable() ? musb_ep->dma : NULL;
/*
* REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
* probably rates reporting as a host error.
*/
if (csr & MUSB_TXCSR_P_SENTSTALL) {
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~MUSB_TXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_TXCSR, csr);
return;
}
if (csr & MUSB_TXCSR_P_UNDERRUN) {
/* We NAKed, no big deal... little reason to care. */
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
musb_writew(epio, MUSB_TXCSR, csr);
dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
epnum, request);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/*
* SHOULD NOT HAPPEN... has with CPPI though, after
* changing SENDSTALL (and other cases); harmless?
dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
if (request) {
u8 is_dma = 0;
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
is_dma = 1;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
/* Ensure writebuffer is empty. */
csr = musb_readw(epio, MUSB_TXCSR);
request->actual += musb_ep->dma->actual_len;
dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
epnum, csr, musb_ep->dma->actual_len, request);
/*
* First, maybe a terminating short packet. Some DMA
* engines might handle this by themselves.
*/
if ((request->zero && request->length
&& (request->length % musb_ep->packet_sz == 0)
&& (request->actual == request->length))
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
|| (is_dma && (!dma->desired_mode ||
(request->actual &
(musb_ep->packet_sz - 1))))
) {
/*
* On DMA completion, FIFO may not be
* available yet...
*/
if (csr & MUSB_TXCSR_TXPKTRDY)
return;
dev_dbg(musb->controller, "sending zero pkt\n");
musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
| MUSB_TXCSR_TXPKTRDY);
request->zero = 0;
}
if (request->actual == request->length) {
musb_g_giveback(musb_ep, request, 0);
/*
* In the giveback function the MUSB lock is
* released and acquired after sometime. During
* this time period the INDEX register could get
* changed by the gadget_queue function especially
* on SMP systems. Reselect the INDEX to be sure
* we are reading/modifying the right registers
*/
musb_ep_select(mbase, epnum);
req = musb_ep->desc ? next_request(musb_ep) : NULL;
if (!req) {
dev_dbg(musb->controller, "%s idle now\n",
musb_ep->end_point.name);
return;
txstate(musb, req);
}
/* ------------------------------------------------------------ */
/*
* Context: controller locked, IRQs blocked, endpoint selected
*/
static void rxstate(struct musb *musb, struct musb_request *req)
{
const u8 epnum = req->epnum;
struct usb_request *request = &req->request;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
unsigned len = 0;
u16 fifo_count;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
fifo_count = musb_ep->packet_sz;
/* Check if EP is disabled */
if (!musb_ep->desc) {
dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
musb_ep->end_point.name);
return;
}
/* We shouldn't get here while DMA is active, but we do... */
if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
dev_dbg(musb->controller, "DMA pending...\n");
return;
}
if (csr & MUSB_RXCSR_P_SENDSTALL) {
dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
musb_ep->end_point.name, csr);
return;
}
if (is_cppi_enabled() && is_buffer_mapped(req)) {
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
/* NOTE: CPPI won't actually stop advancing the DMA
* queue after short packet transfers, so this is almost
* always going to run as IRQ-per-packet DMA so that
* faults will be handled correctly.
*/
if (c->channel_program(channel,
musb_ep->packet_sz,
!request->short_not_ok,
request->dma + request->actual,
request->length - request->actual)) {
/* make sure that if an rxpkt arrived after the irq,
* the cppi engine will be ready to take it as soon
* as DMA is enabled
*/
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAMODE);
csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
musb_writew(epio, MUSB_RXCSR, csr);
return;
}
}
if (csr & MUSB_RXCSR_RXPKTRDY) {
fifo_count = musb_readw(epio, MUSB_RXCOUNT);
/*
* Enable Mode 1 on RX transfers only when short_not_ok flag
* is set. Currently short_not_ok flag is set only from
* file_storage and f_mass_storage drivers
if (request->short_not_ok && fifo_count == musb_ep->packet_sz)
use_mode_1 = 1;
else
use_mode_1 = 0;
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_buffer_mapped(req)) {
struct dma_controller *c;
struct dma_channel *channel;
int use_dma = 0;
c = musb->dma_controller;
channel = musb_ep->dma;
/* We use DMA Req mode 0 in rx_csr, and DMA controller operates in
* mode 0 only. So we do not get endpoint interrupts due to DMA
* completion. We only get interrupts from DMA controller.
*
* We could operate in DMA mode 1 if we knew the size of the tranfer
* in advance. For mass storage class, request->length = what the host
* sends, so that'd work. But for pretty much everything else,
* request->length is routinely more than what the host sends. For
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
* we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
* to work reliably.
*
* REVISIT an updated g_file_storage can set req->short_not_ok, which
* then becomes usable as a runtime "use mode 1" hint...
*/
/* Experimental: Mode1 works with mass storage use cases */
if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR, csr);
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
/*
* this special sequence (enabling and then
* disabling MUSB_RXCSR_DMAMODE) is required
* to get DMAReq to activate
*/
musb_writew(epio, MUSB_RXCSR,
csr | MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR, csr);
transfer_size = min_t(unsigned int,
request->length -
request->actual,
channel->max_len);
musb_ep->dma->desired_mode = 1;
} else {
if (!musb_ep->hb_mult &&
musb_ep->hw_ep->rx_double_buffered)
csr |= MUSB_RXCSR_AUTOCLEAR;
csr |= MUSB_RXCSR_DMAENAB;
musb_writew(epio, MUSB_RXCSR, csr);
transfer_size = min(request->length - request->actual,
(unsigned)fifo_count);
musb_ep->dma->desired_mode = 0;
use_dma = c->channel_program(
channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size);
return;
}
#elif defined(CONFIG_USB_UX500_DMA)
if ((is_buffer_mapped(req)) &&
(request->actual < request->length)) {
struct dma_controller *c;
struct dma_channel *channel;
c = musb->dma_controller;
channel = musb_ep->dma;
/* In case first packet is short */
if (fifo_count < musb_ep->packet_sz)
transfer_size = fifo_count;
else if (request->short_not_ok)
transfer_size = min_t(unsigned int,
request->length -
request->actual,
channel->max_len);
else
transfer_size = min_t(unsigned int,
request->length -
request->actual,
(unsigned)fifo_count);
csr &= ~MUSB_RXCSR_DMAMODE;
csr |= (MUSB_RXCSR_DMAENAB |
MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
if (transfer_size <= musb_ep->packet_sz) {
musb_ep->dma->desired_mode = 0;
} else {
musb_ep->dma->desired_mode = 1;
/* Mode must be set after DMAENAB */
csr |= MUSB_RXCSR_DMAMODE;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
request->dma
+ request->actual,
transfer_size))
return;
}
#endif /* Mentor's DMA */
len = request->length - request->actual;
dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
fifo_count, len,
fifo_count = min_t(unsigned, len, fifo_count);
if (tusb_dma_omap() && is_buffer_mapped(req)) {
struct dma_controller *c = musb->dma_controller;
struct dma_channel *channel = musb_ep->dma;
u32 dma_addr = request->dma + request->actual;
int ret;
ret = c->channel_program(channel,
musb_ep->packet_sz,
channel->desired_mode,
dma_addr,
fifo_count);
if (ret)
return;
}
#endif
/*
* Unmap the dma buffer back to cpu if dma channel
* programming fails. This buffer is mapped if the
* channel allocation is successful
*/
if (is_buffer_mapped(req)) {
unmap_dma_buffer(req, musb);
/*
* Clear DMAENAB and AUTOCLEAR for the
* PIO mode transfer
*/
csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, csr);
}
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
(request->buf + request->actual));
request->actual += fifo_count;
/* REVISIT if we left anything in the fifo, flush
* it and report -EOVERFLOW
*/
/* ack the read! */
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
}
/* reach the end or short packet detected */
if (request->actual == request->length ||
fifo_count < musb_ep->packet_sz)
musb_g_giveback(musb_ep, request, 0);
}
/*
* Data ready for a request; called from IRQ
*/
void musb_g_rx(struct musb *musb, u8 epnum)
{
u16 csr;
struct musb_request *req;
struct usb_request *request;
void __iomem *mbase = musb->mregs;
struct musb_ep *musb_ep;
void __iomem *epio = musb->endpoints[epnum].regs;
struct dma_channel *dma;
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
else
musb_ep = &hw_ep->ep_out;
req = next_request(musb_ep);
if (!req)
request = &req->request;
csr = musb_readw(epio, MUSB_RXCSR);
dma = is_dma_capable() ? musb_ep->dma : NULL;
dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
csr, dma ? " (dma)" : "", request);
if (csr & MUSB_RXCSR_P_SENTSTALL) {
csr |= MUSB_RXCSR_P_WZC_BITS;
csr &= ~MUSB_RXCSR_P_SENTSTALL;
musb_writew(epio, MUSB_RXCSR, csr);
}
if (csr & MUSB_RXCSR_P_OVERRUN) {
/* csr |= MUSB_RXCSR_P_WZC_BITS; */
csr &= ~MUSB_RXCSR_P_OVERRUN;
musb_writew(epio, MUSB_RXCSR, csr);
dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
if (request->status == -EINPROGRESS)
request->status = -EOVERFLOW;
}
if (csr & MUSB_RXCSR_INCOMPRX) {
/* REVISIT not necessarily an error */
dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
}
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
/* "should not happen"; likely RXPKTRDY pending for DMA */
dev_dbg(musb->controller, "%s busy, csr %04x\n",
}
if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
csr &= ~(MUSB_RXCSR_AUTOCLEAR
| MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_DMAMODE);
musb_writew(epio, MUSB_RXCSR,
MUSB_RXCSR_P_WZC_BITS | csr);
request->actual += musb_ep->dma->actual_len;
dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
epnum, csr,
musb_readw(epio, MUSB_RXCSR),
musb_ep->dma->actual_len, request);
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Autoclear doesn't clear RxPktRdy for short packets */
if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
|| (dma->actual_len
& (musb_ep->packet_sz - 1))) {
/* ack the read! */
csr &= ~MUSB_RXCSR_RXPKTRDY;
musb_writew(epio, MUSB_RXCSR, csr);
}
/* incomplete, and not short? wait for next IN packet */
if ((request->actual < request->length)
&& (musb_ep->dma->actual_len
== musb_ep->packet_sz)) {
/* In double buffer case, continue to unload fifo if
* there is Rx packet in FIFO.
**/
csr = musb_readw(epio, MUSB_RXCSR);
if ((csr & MUSB_RXCSR_RXPKTRDY) &&
hw_ep->rx_double_buffered)
goto exit;
#endif
musb_g_giveback(musb_ep, request, 0);
/*
* In the giveback function the MUSB lock is
* released and acquired after sometime. During
* this time period the INDEX register could get
* changed by the gadget_queue function especially
* on SMP systems. Reselect the INDEX to be sure
* we are reading/modifying the right registers
*/
musb_ep_select(mbase, epnum);
req = next_request(musb_ep);
if (!req)
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
defined(CONFIG_USB_UX500_DMA)
/* Analyze request */
rxstate(musb, req);
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
}
/* ------------------------------------------------------------ */
static int musb_gadget_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
unsigned long flags;
struct musb_ep *musb_ep;
struct musb_hw_ep *hw_ep;
void __iomem *regs;
struct musb *musb;
void __iomem *mbase;
u8 epnum;
u16 csr;
unsigned tmp;
int status = -EINVAL;
if (!ep || !desc)
return -EINVAL;
musb_ep = to_musb_ep(ep);
hw_ep = musb_ep->hw_ep;
regs = hw_ep->regs;
musb = musb_ep->musb;
mbase = musb->mregs;
epnum = musb_ep->current_epnum;
spin_lock_irqsave(&musb->lock, flags);
if (musb_ep->desc) {
status = -EBUSY;
goto fail;
}
musb_ep->type = usb_endpoint_type(desc);
/* check direction and (later) maxpacket size against endpoint */
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
tmp = usb_endpoint_maxp(desc);
if (tmp & ~0x07ff) {
int ok;
if (usb_endpoint_dir_in(desc))
ok = musb->hb_iso_tx;
else
ok = musb->hb_iso_rx;
if (!ok) {
dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
goto fail;
}
musb_ep->hb_mult = (tmp >> 11) & 3;
} else {
musb_ep->hb_mult = 0;
}
musb_ep->packet_sz = tmp & 0x7ff;
tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
/* enable the interrupts for the endpoint, set the endpoint