Newer
Older
tty_flip_buffer_push(tport);
spin_lock(&port->lock);
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
}
static int atmel_prepare_rx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct dma_async_tx_descriptor *desc;
dma_cap_mask_t mask;
struct dma_slave_config config;
struct circ_buf *ring;
int ret, nent;
ring = &atmel_port->rx_ring;
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);
atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
if (atmel_port->chan_rx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for rx DMA transfers\n",
dma_chan_name(atmel_port->chan_rx));
spin_lock_init(&atmel_port->lock_rx);
sg_init_table(&atmel_port->sg_rx, 1);
/* UART circular rx buffer is an aligned page. */
BUG_ON(!PAGE_ALIGNED(ring->buf));
sizeof(struct atmel_uart_char) * ATMEL_SERIAL_RINGSIZE,
(int)ring->buf & ~PAGE_MASK);
nent = dma_map_sg(port->dev,
&atmel_port->sg_rx,
1,
DMA_FROM_DEVICE);
if (!nent) {
dev_dbg(port->dev, "need to release resource of dma\n");
goto chan_err;
} else {
dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
sg_dma_len(&atmel_port->sg_rx),
ring->buf,
sg_dma_address(&atmel_port->sg_rx));
}
/* Configure the slave DMA */
memset(&config, 0, sizeof(config));
config.direction = DMA_DEV_TO_MEM;
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.src_addr = port->mapbase + ATMEL_US_RHR;
config.src_maxburst = 1;
ret = dmaengine_slave_config(atmel_port->chan_rx,
&config);
if (ret) {
dev_err(port->dev, "DMA rx slave configuration failed\n");
goto chan_err;
}
/*
* Prepare a cyclic dma transfer, assign 2 descriptors,
* each one is half ring buffer size
*/
desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
sg_dma_address(&atmel_port->sg_rx),
sg_dma_len(&atmel_port->sg_rx),
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
atmel_port->cookie_rx = dmaengine_submit(desc);
return 0;
chan_err:
dev_err(port->dev, "RX channel not available, switch to pio\n");
atmel_port->use_dma_rx = 0;
if (atmel_port->chan_rx)
atmel_release_rx_dma(port);
return -EINVAL;
}
static void atmel_uart_timer_callback(unsigned long data)
{
struct uart_port *port = (void *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
tasklet_schedule(&atmel_port->tasklet);
mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
}
/*
* receive interrupt handler.
*/
static void
atmel_handle_receive(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_rx(port)) {
/*
* PDC receive. Just schedule the tasklet and let it
* figure out the details.
*
* TODO: We're not handling error flags correctly at
* the moment.
*/
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
atmel_uart_writel(port, ATMEL_US_IDR,
(ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
tasklet_schedule(&atmel_port->tasklet);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
ATMEL_US_FRAME | ATMEL_US_PARE))
atmel_pdc_rxerr(port, pending);
}
if (atmel_use_dma_rx(port)) {
if (pending & ATMEL_US_TIMEOUT) {
atmel_uart_writel(port, ATMEL_US_IDR,
ATMEL_US_TIMEOUT);
tasklet_schedule(&atmel_port->tasklet);
}
}
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
else if (pending & ATMEL_US_RXBRK) {
/*
* End of break detected. If it came along with a
* character, atmel_rx_chars will handle it.
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
atmel_port->break_active = 0;
}
}
/*
* transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
*/
static void
atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
atmel_uart_writel(port, ATMEL_US_IDR,
atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet);
}
/*
* status flags interrupt handler.
*/
static void
atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
atmel_port->irq_status = status;
atmel_port->status_change = atmel_port->irq_status ^
atmel_port->irq_status_prev;
atmel_port->irq_status_prev = status;
tasklet_schedule(&atmel_port->tasklet);
}
/*
* Interrupt handler
*/
static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, pending, mask, pass_counter = 0;
bool gpio_handled = false;
spin_lock(&atmel_port->lock_suspended);
status = atmel_get_lines_status(port);
mask = atmel_uart_readl(port, ATMEL_US_IMR);
pending = status & mask;
if (!gpio_handled) {
/*
* Dealing with GPIO interrupt
*/
if (irq == atmel_port->gpio_irq[UART_GPIO_CTS])
pending |= ATMEL_US_CTSIC;
if (irq == atmel_port->gpio_irq[UART_GPIO_DSR])
pending |= ATMEL_US_DSRIC;
if (irq == atmel_port->gpio_irq[UART_GPIO_RI])
pending |= ATMEL_US_RIIC;
if (irq == atmel_port->gpio_irq[UART_GPIO_DCD])
pending |= ATMEL_US_DCDIC;
gpio_handled = true;
}
if (atmel_port->suspended) {
atmel_port->pending |= pending;
atmel_port->pending_status = status;
atmel_uart_writel(port, ATMEL_US_IDR, mask);
pm_system_wakeup();
break;
}
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
spin_unlock(&atmel_port->lock_suspended);
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
static void atmel_release_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
}
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
static void atmel_tx_pdc(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
int count;
/* nothing left to transmit? */
if (atmel_uart_readl(port, ATMEL_PDC_TCR))
xmit->tail += pdc->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
port->icount.tx += pdc->ofs;
pdc->ofs = 0;
/* more to transmit - setup next transfer */
/* disable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
atmel_uart_writel(port, ATMEL_PDC_TPR,
pdc->dma_addr + xmit->tail);
atmel_uart_writel(port, ATMEL_PDC_TCR, count);
/* re-enable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER,
atmel_port->tx_done_mask);
} else {
if ((port->rs485.flags & SER_RS485_ENABLED) &&
!(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
static int atmel_prepare_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
struct circ_buf *xmit = &port->state->xmit;
pdc->buf = xmit->buf;
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
pdc->dma_size = UART_XMIT_SIZE;
pdc->ofs = 0;
return 0;
}
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
struct circ_buf *ring = &atmel_port->rx_ring;
unsigned int flg;
unsigned int status;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
/* Make sure c is loaded after head. */
smp_rmb();
c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
port->icount.rx++;
status = c.status;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & ATMEL_US_RXBRK)
flg = TTY_BREAK;
else if (status & ATMEL_US_PARE)
flg = TTY_PARITY;
else if (status & ATMEL_US_FRAME)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c.ch))
continue;
uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
}
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
static void atmel_release_rx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
}
static void atmel_rx_from_pdc(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_port *tport = &port->state->port;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
unsigned int tail;
unsigned int count;
do {
/* Reset the UART timeout early so that we don't miss one */
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
* any address within the current buffer. Since head
* is unsigned, we just need a one-way comparison to
* find out.
*
* In this case, we just need to consume the entire
* buffer and resubmit it for DMA. This will clear the
* ENDRX bit as well, so that we can safely re-enable
* all interrupts below.
*/
head = min(head, pdc->dma_size);
if (likely(head != tail)) {
dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
/*
* head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
count = head - tail;
tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
port->icount.rx += count;
pdc->ofs = head;
}
/*
* If the current buffer is full, we need to check if
* the next one contains any additional data.
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
}
} while (head >= pdc->dma_size);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
atmel_uart_writel(port, ATMEL_US_IER,
ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
static int atmel_prepare_rx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
if (pdc->buf == NULL) {
if (i != 0) {
dma_unmap_single(port->dev,
atmel_port->pdc_rx[0].dma_addr,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
atmel_port->use_pdc_rx = 0;
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
pdc->dma_size = PDC_BUFFER_SIZE;
pdc->ofs = 0;
}
atmel_port->pdc_rx_idx = 0;
atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
atmel_uart_writel(port, ATMEL_PDC_RNPR,
atmel_port->pdc_rx[1].dma_addr);
atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
return 0;
}
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
static void atmel_tasklet_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status = atmel_port->irq_status;
unsigned int status_change = atmel_port->status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
atmel_port->schedule_tx(port);
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
if (status_change & ATMEL_US_RI)
port->icount.rng++;
if (status_change & ATMEL_US_DSR)
port->icount.dsr++;
if (status_change & ATMEL_US_DCD)
uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
if (status_change & ATMEL_US_CTS)
uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
wake_up_interruptible(&port->state->port.delta_msr_wait);
atmel_port->status_change = 0;
atmel_port->schedule_rx(port);
spin_unlock(&port->lock);
}
static void atmel_init_property(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
if (np) {
/* DMA/PDC usage specification */
if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
if (of_get_property(np, "dmas", NULL)) {
atmel_port->use_dma_rx = true;
atmel_port->use_pdc_rx = false;
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = true;
}
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = false;
}
if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
if (of_get_property(np, "dmas", NULL)) {
atmel_port->use_dma_tx = true;
atmel_port->use_pdc_tx = false;
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = true;
}
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = false;
}
} else {
atmel_port->use_pdc_rx = pdata->use_dma_rx;
atmel_port->use_pdc_tx = pdata->use_dma_tx;
atmel_port->use_dma_rx = false;
atmel_port->use_dma_tx = false;
}
}
static void atmel_init_rs485(struct uart_port *port,
struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
if (np) {
u32 rs485_delay[2];
/* rs485 properties */
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
struct serial_rs485 *rs485conf = &port->rs485;
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
rs485conf->flags = 0;
if (of_get_property(np, "rs485-rx-during-tx", NULL))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
NULL))
rs485conf->flags |= SER_RS485_ENABLED;
}
} else {
port->rs485 = pdata->rs485;
static void atmel_set_ops(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_rx(port)) {
atmel_port->prepare_rx = &atmel_prepare_rx_dma;
atmel_port->schedule_rx = &atmel_rx_from_dma;
atmel_port->release_rx = &atmel_release_rx_dma;
} else if (atmel_use_pdc_rx(port)) {
atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
atmel_port->schedule_rx = &atmel_rx_from_pdc;
atmel_port->release_rx = &atmel_release_rx_pdc;
} else {
atmel_port->prepare_rx = NULL;
atmel_port->schedule_rx = &atmel_rx_from_ring;
atmel_port->release_rx = NULL;
}
if (atmel_use_dma_tx(port)) {
atmel_port->prepare_tx = &atmel_prepare_tx_dma;
atmel_port->schedule_tx = &atmel_tx_dma;
atmel_port->release_tx = &atmel_release_tx_dma;
} else if (atmel_use_pdc_tx(port)) {
atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
atmel_port->schedule_tx = &atmel_tx_pdc;
atmel_port->release_tx = &atmel_release_tx_pdc;
} else {
atmel_port->prepare_tx = NULL;
atmel_port->schedule_tx = &atmel_tx_chars;
atmel_port->release_tx = NULL;
}
}
/*
* Get ip name usart or uart
*/
static void atmel_get_ip_name(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int name = atmel_uart_readl(port, ATMEL_US_NAME);
u32 version;
int usart, uart;
/* usart and uart ascii */
usart = 0x55534152;
uart = 0x44424755;
atmel_port->is_usart = false;
if (name == usart) {
dev_dbg(port->dev, "This is usart\n");
atmel_port->is_usart = true;
} else if (name == uart) {
dev_dbg(port->dev, "This is uart\n");
atmel_port->is_usart = false;
} else {
/* fallback for older SoCs: use version field */
version = atmel_uart_readl(port, ATMEL_US_VERSION);
switch (version) {
case 0x302:
case 0x10213:
dev_dbg(port->dev, "This version is usart\n");
atmel_port->is_usart = true;
break;
case 0x203:
case 0x10202:
dev_dbg(port->dev, "This version is uart\n");
atmel_port->is_usart = false;
break;
default:
dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
}
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
static void atmel_free_gpio_irq(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
enum mctrl_gpio_idx i;
for (i = 0; i < UART_GPIO_MAX; i++)
if (atmel_port->gpio_irq[i] >= 0)
free_irq(atmel_port->gpio_irq[i], port);
}
static int atmel_request_gpio_irq(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int *irq = atmel_port->gpio_irq;
enum mctrl_gpio_idx i;
int err = 0;
for (i = 0; (i < UART_GPIO_MAX) && !err; i++) {
if (irq[i] < 0)
continue;
irq_set_status_flags(irq[i], IRQ_NOAUTOEN);
err = request_irq(irq[i], atmel_interrupt, IRQ_TYPE_EDGE_BOTH,
"atmel_serial", port);
if (err)
dev_err(port->dev, "atmel_startup - Can't get %d irq\n",
irq[i]);
}
/*
* If something went wrong, rollback.
*/
while (err && (--i >= 0))
if (irq[i] >= 0)
free_irq(irq[i], port);
return err;
}
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
struct platform_device *pdev = to_platform_device(port->dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int retval;
/*
* Ensure that no interrupts are enabled otherwise when
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
atmel_uart_writel(port, ATMEL_US_IDR, -1);
atmel_port->ms_irq_enabled = false;
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, atmel_interrupt,
IRQF_SHARED | IRQF_COND_SUSPEND,
tty ? tty->name : "atmel_serial", port);
dev_err(port->dev, "atmel_startup - Can't get irq\n");
return retval;
}
/*
* Get the GPIO lines IRQ
*/
retval = atmel_request_gpio_irq(port);
if (retval)
goto free_irq;
tasklet_enable(&atmel_port->tasklet);
/*
* Initialize DMA (if necessary)
*/
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
if (atmel_port->prepare_rx) {
retval = atmel_port->prepare_rx(port);
if (retval < 0)
atmel_set_ops(port);
if (atmel_port->prepare_tx) {
retval = atmel_port->prepare_tx(port);
if (retval < 0)
atmel_set_ops(port);
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = atmel_get_lines_status(port);
atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
*/
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
setup_timer(&atmel_port->uart_timer,
atmel_uart_timer_callback,
(unsigned long)port);
if (atmel_use_pdc_rx(port)) {
if (!atmel_port->is_usart) {
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
atmel_uart_writel(port, ATMEL_US_IER,
ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
/* set UART timeout */
if (!atmel_port->is_usart) {
mod_timer(&atmel_port->uart_timer,
jiffies + uart_poll_timeout(port));
/* set USART timeout */
} else {
atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
atmel_uart_writel(port, ATMEL_US_IER,
ATMEL_US_TIMEOUT);
atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
free_irq:
free_irq(port->irq, port);
return retval;
/*
* Flush any TX data submitted for DMA. Called when the TX circular
* buffer is reset.
*/
static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_tx(port)) {
atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/*
* Prevent any tasklets being scheduled during
* cleanup
*/
del_timer_sync(&atmel_port->uart_timer);
/*
* Clear out any scheduled tasklets before
* we destroy the buffers
*/
tasklet_disable(&atmel_port->tasklet);
tasklet_kill(&atmel_port->tasklet);
* Ensure everything is stopped and
* disable all interrupts, port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
atmel_uart_writel(port, ATMEL_US_IDR, -1);
if (atmel_port->release_rx)
atmel_port->release_rx(port);
if (atmel_port->release_tx)
atmel_port->release_tx(port);
/*
* Reset ring buffer pointers
*/
atmel_port->rx_ring.head = 0;
atmel_port->rx_ring.tail = 0;
* Free the interrupts
*/
free_irq(port->irq, port);
atmel_free_gpio_irq(port);
atmel_port->ms_irq_enabled = false;
atmel_flush_buffer(port);
}
/*
* Power / Clock management.
*/
static void atmel_serial_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
case 0:
/*
* Enable the peripheral clock for this serial port.
* This is called on uart_open() or a resume event.
*/
clk_prepare_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
/* Back up the interrupt mask and disable all interrupts */
atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
atmel_uart_writel(port, ATMEL_US_IDR, -1);
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
*/
clk_disable_unprepare(atmel_port->clk);
dev_err(port->dev, "atmel_serial: unknown pm %d\n", state);
}
}
/*
* Change the port parameters
*/
static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
Cyrille Pitchen
committed
unsigned int old_mode, mode, imr, quot, baud;
/* save the current mode register */
mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
Cyrille Pitchen
committed
/* reset the mode, clock divisor, parity, stop bits and data size */
mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
ATMEL_US_PAR | ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
quot /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
}
/* byte size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mode |= ATMEL_US_CHRL_5;
break;
case CS6:
mode |= ATMEL_US_CHRL_6;
break;
case CS7:
mode |= ATMEL_US_CHRL_7;
mode |= ATMEL_US_CHRL_8;
break;
}
/* stop bits */