Newer
Older
struct dma_chan *chan = atmel_port->chan_rx;
struct dma_tx_state state;
enum dma_status dmastat;
/* Reset the UART timeout early so that we don't miss one */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
dmastat = dmaengine_tx_status(chan,
atmel_port->cookie_rx,
&state);
/* Restart a new tasklet if DMA status is error */
if (dmastat == DMA_ERROR) {
dev_dbg(port->dev, "Get residue error, restart tasklet\n");
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
tasklet_schedule(&atmel_port->tasklet_rx);
/* CPU claims ownership of RX DMA buffer */
dma_sync_sg_for_cpu(port->dev,
&atmel_port->sg_rx,
1,
Cyrille Pitchen
committed
DMA_FROM_DEVICE);
/*
* ring->head points to the end of data already written by the DMA.
* ring->tail points to the beginning of data to be read by the
* framework.
* The current transfer size should not be larger than the dma buffer
* length.
*/
ring->head = sg_dma_len(&atmel_port->sg_rx) - state.residue;
BUG_ON(ring->head > sg_dma_len(&atmel_port->sg_rx));
* At this point ring->head may point to the first byte right after the
* last byte of the dma buffer:
* 0 <= ring->head <= sg_dma_len(&atmel_port->sg_rx)
*
* However ring->tail must always points inside the dma buffer:
* 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1
*
* Since we use a ring buffer, we have to handle the case
* where head is lower than tail. In such a case, we first read from
* tail to the end of the buffer then reset tail.
if (ring->head < ring->tail) {
count = sg_dma_len(&atmel_port->sg_rx) - ring->tail;
tty_insert_flip_string(tport, ring->buf + ring->tail, count);
ring->tail = 0;
port->icount.rx += count;
}
/* Finally we read data from tail to head */
if (ring->tail < ring->head) {
count = ring->head - ring->tail;
tty_insert_flip_string(tport, ring->buf + ring->tail, count);
/* Wrap ring->head if needed */
if (ring->head >= sg_dma_len(&atmel_port->sg_rx))
ring->head = 0;
ring->tail = ring->head;
/* USART retreives ownership of RX DMA buffer */
dma_sync_sg_for_device(port->dev,
&atmel_port->sg_rx,
1,
Cyrille Pitchen
committed
DMA_FROM_DEVICE);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(tport);
spin_lock(&port->lock);
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
}
static int atmel_prepare_rx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct dma_async_tx_descriptor *desc;
dma_cap_mask_t mask;
struct dma_slave_config config;
struct circ_buf *ring;
int ret, nent;
ring = &atmel_port->rx_ring;
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);
atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
if (atmel_port->chan_rx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for rx DMA transfers\n",
dma_chan_name(atmel_port->chan_rx));
spin_lock_init(&atmel_port->lock_rx);
sg_init_table(&atmel_port->sg_rx, 1);
/* UART circular rx buffer is an aligned page. */
BUG_ON(!PAGE_ALIGNED(ring->buf));
sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
(unsigned long)ring->buf & ~PAGE_MASK);
nent = dma_map_sg(port->dev,
&atmel_port->sg_rx,
1,
DMA_FROM_DEVICE);
if (!nent) {
dev_dbg(port->dev, "need to release resource of dma\n");
goto chan_err;
} else {
dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__,
sg_dma_len(&atmel_port->sg_rx),
ring->buf,
&sg_dma_address(&atmel_port->sg_rx));
}
/* Configure the slave DMA */
memset(&config, 0, sizeof(config));
config.direction = DMA_DEV_TO_MEM;
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.src_addr = port->mapbase + ATMEL_US_RHR;
config.src_maxburst = 1;
ret = dmaengine_slave_config(atmel_port->chan_rx,
&config);
if (ret) {
dev_err(port->dev, "DMA rx slave configuration failed\n");
goto chan_err;
}
/*
* Prepare a cyclic dma transfer, assign 2 descriptors,
* each one is half ring buffer size
*/
desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
sg_dma_address(&atmel_port->sg_rx),
sg_dma_len(&atmel_port->sg_rx),
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
atmel_port->cookie_rx = dmaengine_submit(desc);
return 0;
chan_err:
dev_err(port->dev, "RX channel not available, switch to pio\n");
atmel_port->use_dma_rx = 0;
if (atmel_port->chan_rx)
atmel_release_rx_dma(port);
return -EINVAL;
}
static void atmel_uart_timer_callback(unsigned long data)
{
struct uart_port *port = (void *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
tasklet_schedule(&atmel_port->tasklet_rx);
mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
}
/*
* receive interrupt handler.
*/
static void
atmel_handle_receive(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_rx(port)) {
/*
* PDC receive. Just schedule the tasklet and let it
* figure out the details.
*
* TODO: We're not handling error flags correctly at
* the moment.
*/
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
atmel_uart_writel(port, ATMEL_US_IDR,
(ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
tasklet_schedule(&atmel_port->tasklet_rx);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
ATMEL_US_FRAME | ATMEL_US_PARE))
atmel_pdc_rxerr(port, pending);
}
if (atmel_use_dma_rx(port)) {
if (pending & ATMEL_US_TIMEOUT) {
atmel_uart_writel(port, ATMEL_US_IDR,
ATMEL_US_TIMEOUT);
tasklet_schedule(&atmel_port->tasklet_rx);
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
else if (pending & ATMEL_US_RXBRK) {
/*
* End of break detected. If it came along with a
* character, atmel_rx_chars will handle it.
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
atmel_port->break_active = 0;
}
}
/*
* transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
*/
static void
atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet_tx);
}
/*
* status flags interrupt handler.
*/
static void
atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status_change;
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
status_change = status ^ atmel_port->irq_status_prev;
atmel_port->irq_status_prev = status;
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
if (status_change & ATMEL_US_RI)
port->icount.rng++;
if (status_change & ATMEL_US_DSR)
port->icount.dsr++;
if (status_change & ATMEL_US_DCD)
uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
if (status_change & ATMEL_US_CTS)
uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
/*
* Interrupt handler
*/
static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, pending, mask, pass_counter = 0;
spin_lock(&atmel_port->lock_suspended);
status = atmel_get_lines_status(port);
mask = atmel_uart_readl(port, ATMEL_US_IMR);
pending = status & mask;
if (atmel_port->suspended) {
atmel_port->pending |= pending;
atmel_port->pending_status = status;
atmel_uart_writel(port, ATMEL_US_IDR, mask);
pm_system_wakeup();
break;
}
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
spin_unlock(&atmel_port->lock_suspended);
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
static void atmel_release_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
}
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
static void atmel_tx_pdc(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
int count;
/* nothing left to transmit? */
if (atmel_uart_readl(port, ATMEL_PDC_TCR))
xmit->tail += pdc->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
port->icount.tx += pdc->ofs;
pdc->ofs = 0;
/* more to transmit - setup next transfer */
/* disable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
atmel_uart_writel(port, ATMEL_PDC_TPR,
pdc->dma_addr + xmit->tail);
atmel_uart_writel(port, ATMEL_PDC_TCR, count);
/* re-enable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
} else {
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
static int atmel_prepare_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
struct circ_buf *xmit = &port->state->xmit;
pdc->buf = xmit->buf;
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
pdc->dma_size = UART_XMIT_SIZE;
pdc->ofs = 0;
return 0;
}
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
struct circ_buf *ring = &atmel_port->rx_ring;
unsigned int flg;
unsigned int status;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
/* Make sure c is loaded after head. */
smp_rmb();
c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
port->icount.rx++;
status = c.status;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & ATMEL_US_RXBRK)
flg = TTY_BREAK;
else if (status & ATMEL_US_PARE)
flg = TTY_PARITY;
else if (status & ATMEL_US_FRAME)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c.ch))
continue;
uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
}
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
static void atmel_release_rx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
}
static void atmel_rx_from_pdc(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_port *tport = &port->state->port;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
unsigned int tail;
unsigned int count;
do {
/* Reset the UART timeout early so that we don't miss one */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
* any address within the current buffer. Since head
* is unsigned, we just need a one-way comparison to
* find out.
*
* In this case, we just need to consume the entire
* buffer and resubmit it for DMA. This will clear the
* ENDRX bit as well, so that we can safely re-enable
* all interrupts below.
*/
head = min(head, pdc->dma_size);
if (likely(head != tail)) {
dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
/*
* head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
count = head - tail;
tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
port->icount.rx += count;
pdc->ofs = head;
}
/*
* If the current buffer is full, we need to check if
* the next one contains any additional data.
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
}
} while (head >= pdc->dma_size);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
atmel_uart_writel(port, ATMEL_US_IER,
ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
static int atmel_prepare_rx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
if (pdc->buf == NULL) {
if (i != 0) {
dma_unmap_single(port->dev,
atmel_port->pdc_rx[0].dma_addr,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
atmel_port->use_pdc_rx = 0;
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
pdc->dma_size = PDC_BUFFER_SIZE;
pdc->ofs = 0;
}
atmel_port->pdc_rx_idx = 0;
atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
atmel_uart_writel(port, ATMEL_PDC_RNPR,
atmel_port->pdc_rx[1].dma_addr);
atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
return 0;
}
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
static void atmel_tasklet_rx_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
atmel_port->schedule_rx(port);
spin_unlock(&port->lock);
}
static void atmel_tasklet_tx_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
atmel_port->schedule_tx(port);
spin_unlock(&port->lock);
}
static void atmel_init_property(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
if (np) {
/* DMA/PDC usage specification */
if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
if (of_get_property(np, "dmas", NULL)) {
atmel_port->use_dma_rx = true;
atmel_port->use_pdc_rx = false;
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = true;
}
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = false;
}
if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
if (of_get_property(np, "dmas", NULL)) {
atmel_port->use_dma_tx = true;
atmel_port->use_pdc_tx = false;
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = true;
}
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = false;
}
} else {
atmel_port->use_pdc_rx = pdata->use_dma_rx;
atmel_port->use_pdc_tx = pdata->use_dma_tx;
atmel_port->use_dma_rx = false;
atmel_port->use_dma_tx = false;
}
}
static void atmel_init_rs485(struct uart_port *port,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
struct serial_rs485 *rs485conf = &port->rs485;
u32 rs485_delay[2];
/* rs485 properties */
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
rs485conf->flags = 0;
if (of_get_property(np, "rs485-rx-during-tx", NULL))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
NULL))
rs485conf->flags |= SER_RS485_ENABLED;
} else {
port->rs485 = pdata->rs485;
static void atmel_set_ops(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_rx(port)) {
atmel_port->prepare_rx = &atmel_prepare_rx_dma;
atmel_port->schedule_rx = &atmel_rx_from_dma;
atmel_port->release_rx = &atmel_release_rx_dma;
} else if (atmel_use_pdc_rx(port)) {
atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
atmel_port->schedule_rx = &atmel_rx_from_pdc;
atmel_port->release_rx = &atmel_release_rx_pdc;
} else {
atmel_port->prepare_rx = NULL;
atmel_port->schedule_rx = &atmel_rx_from_ring;
atmel_port->release_rx = NULL;
}
if (atmel_use_dma_tx(port)) {
atmel_port->prepare_tx = &atmel_prepare_tx_dma;
atmel_port->schedule_tx = &atmel_tx_dma;
atmel_port->release_tx = &atmel_release_tx_dma;
} else if (atmel_use_pdc_tx(port)) {
atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
atmel_port->schedule_tx = &atmel_tx_pdc;
atmel_port->release_tx = &atmel_release_tx_pdc;
} else {
atmel_port->prepare_tx = NULL;
atmel_port->schedule_tx = &atmel_tx_chars;
atmel_port->release_tx = NULL;
}
}
/*
* Get ip name usart or uart
*/
static void atmel_get_ip_name(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int name = atmel_uart_readl(port, ATMEL_US_NAME);
u32 version;
u32 usart, dbgu_uart, new_uart;
/* ASCII decoding for IP version */
usart = 0x55534152; /* USAR(T) */
dbgu_uart = 0x44424755; /* DBGU */
new_uart = 0x55415254; /* UART */
atmel_port->has_hw_timer = false;
if (name == new_uart) {
dev_dbg(port->dev, "Uart with hw timer");
atmel_port->has_hw_timer = true;
atmel_port->rtor = ATMEL_UA_RTOR;
} else if (name == usart) {
dev_dbg(port->dev, "Usart\n");
atmel_port->has_hw_timer = true;
atmel_port->rtor = ATMEL_US_RTOR;
} else if (name == dbgu_uart) {
dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
/* fallback for older SoCs: use version field */
version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
case 0x302:
case 0x10213:
dev_dbg(port->dev, "This version is usart\n");
atmel_port->has_hw_timer = true;
atmel_port->rtor = ATMEL_US_RTOR;
break;
case 0x203:
case 0x10202:
dev_dbg(port->dev, "This version is uart\n");
break;
default:
dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
}
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
struct platform_device *pdev = to_platform_device(port->dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int retval;
/*
* Ensure that no interrupts are enabled otherwise when
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_port->ms_irq_enabled = false;
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, atmel_interrupt,
IRQF_SHARED | IRQF_COND_SUSPEND,
tty ? tty->name : "atmel_serial", port);
dev_err(port->dev, "atmel_startup - Can't get irq\n");
return retval;
}
tasklet_enable(&atmel_port->tasklet_rx);
tasklet_enable(&atmel_port->tasklet_tx);
/*
* Initialize DMA (if necessary)
*/
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
if (atmel_port->prepare_rx) {
retval = atmel_port->prepare_rx(port);
if (retval < 0)
atmel_set_ops(port);
if (atmel_port->prepare_tx) {
retval = atmel_port->prepare_tx(port);
if (retval < 0)
atmel_set_ops(port);
/*
* Enable FIFO when available
*/
if (atmel_port->fifo_size) {
unsigned int txrdym = ATMEL_US_ONE_DATA;
unsigned int rxrdym = ATMEL_US_ONE_DATA;
unsigned int fmr;
atmel_uart_writel(port, ATMEL_US_CR,
ATMEL_US_FIFOEN |
ATMEL_US_RXFCLR |
ATMEL_US_TXFLCLR);
if (atmel_use_dma_tx(port))
txrdym = ATMEL_US_FOUR_DATA;
fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
if (atmel_port->rts_high &&
atmel_port->rts_low)
fmr |= ATMEL_US_FRTSC |
ATMEL_US_RXFTHRES(atmel_port->rts_high) |
ATMEL_US_RXFTHRES2(atmel_port->rts_low);
atmel_uart_writel(port, ATMEL_US_FMR, fmr);
}
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = atmel_get_lines_status(port);
/*
* Finally, enable the serial port
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
setup_timer(&atmel_port->uart_timer,
atmel_uart_timer_callback,
(unsigned long)port);
if (atmel_use_pdc_rx(port)) {
if (!atmel_port->has_hw_timer) {
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
atmel_uart_writel(port, atmel_port->rtor,
PDC_RX_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
atmel_uart_writel(port, ATMEL_US_IER,
ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
/* set UART timeout */
if (!atmel_port->has_hw_timer) {
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
atmel_uart_writel(port, atmel_port->rtor,
PDC_RX_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
atmel_uart_writel(port, ATMEL_US_IER,
ATMEL_US_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
return 0;
}
/*
* Flush any TX data submitted for DMA. Called when the TX circular
* buffer is reset.
*/
static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_tx(port)) {
atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/*
* Prevent any tasklets being scheduled during
* cleanup
*/
del_timer_sync(&atmel_port->uart_timer);
/*
* Clear out any scheduled tasklets before
* we destroy the buffers
*/
tasklet_disable(&atmel_port->tasklet_rx);
tasklet_disable(&atmel_port->tasklet_tx);
tasklet_kill(&atmel_port->tasklet_rx);
tasklet_kill(&atmel_port->tasklet_tx);
* Ensure everything is stopped and
* disable all interrupts, port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
atmel_uart_writel(port, ATMEL_US_IDR, -1);
if (atmel_port->release_rx)
atmel_port->release_rx(port);
if (atmel_port->release_tx)
atmel_port->release_tx(port);
/*
* Reset ring buffer pointers
*/
atmel_port->rx_ring.head = 0;
atmel_port->rx_ring.tail = 0;
* Free the interrupts
*/
free_irq(port->irq, port);
atmel_port->ms_irq_enabled = false;
atmel_flush_buffer(port);
}
/*
* Power / Clock management.
*/
static void atmel_serial_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
case 0:
/*
* Enable the peripheral clock for this serial port.
* This is called on uart_open() or a resume event.
*/
clk_prepare_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
/* Back up the interrupt mask and disable all interrupts */
atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
atmel_uart_writel(port, ATMEL_US_IDR, -1);
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
*/
clk_disable_unprepare(atmel_port->clk);
dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);