Newer
Older
* transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
*/
static void
atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet);
}
/*
* status flags interrupt handler.
*/
static void
atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
atmel_port->irq_status = status;
tasklet_schedule(&atmel_port->tasklet);
}
/*
* Interrupt handler
*/
static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status, pending, pass_counter = 0;
do {
status = UART_GET_CSR(port);
pending = status & UART_GET_IMR(port);
if (!pending)
break;
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
static void atmel_release_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
}
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
static void atmel_tx_pdc(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
int count;
/* nothing left to transmit? */
if (UART_GET_TCR(port))
return;
xmit->tail += pdc->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
port->icount.tx += pdc->ofs;
pdc->ofs = 0;
/* more to transmit - setup next transfer */
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
UART_PUT_TCR(port, count);
/* re-enable PDC transmit */
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
} else {
if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
!(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
static int atmel_prepare_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
struct circ_buf *xmit = &port->state->xmit;
pdc->buf = xmit->buf;
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
pdc->dma_size = UART_XMIT_SIZE;
pdc->ofs = 0;
return 0;
}
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
struct circ_buf *ring = &atmel_port->rx_ring;
unsigned int flg;
unsigned int status;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
/* Make sure c is loaded after head. */
smp_rmb();
c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
port->icount.rx++;
status = c.status;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & ATMEL_US_RXBRK)
flg = TTY_BREAK;
else if (status & ATMEL_US_PARE)
flg = TTY_PARITY;
else if (status & ATMEL_US_FRAME)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c.ch))
continue;
uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
}
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
static void atmel_release_rx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
}
static void atmel_rx_from_pdc(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_port *tport = &port->state->port;
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
unsigned int tail;
unsigned int count;
do {
/* Reset the UART timeout early so that we don't miss one */
UART_PUT_CR(port, ATMEL_US_STTTO);
pdc = &atmel_port->pdc_rx[rx_idx];
head = UART_GET_RPR(port) - pdc->dma_addr;
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
* any address within the current buffer. Since head
* is unsigned, we just need a one-way comparison to
* find out.
*
* In this case, we just need to consume the entire
* buffer and resubmit it for DMA. This will clear the
* ENDRX bit as well, so that we can safely re-enable
* all interrupts below.
*/
head = min(head, pdc->dma_size);
if (likely(head != tail)) {
dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
/*
* head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
count = head - tail;
tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
count);
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
port->icount.rx += count;
pdc->ofs = head;
}
/*
* If the current buffer is full, we need to check if
* the next one contains any additional data.
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
UART_PUT_RNPR(port, pdc->dma_addr);
UART_PUT_RNCR(port, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
}
} while (head >= pdc->dma_size);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
spin_lock(&port->lock);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
static int atmel_prepare_rx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
if (pdc->buf == NULL) {
if (i != 0) {
dma_unmap_single(port->dev,
atmel_port->pdc_rx[0].dma_addr,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
atmel_port->use_pdc_rx = 0;
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
pdc->dma_size = PDC_BUFFER_SIZE;
pdc->ofs = 0;
}
atmel_port->pdc_rx_idx = 0;
UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
UART_PUT_RCR(port, PDC_BUFFER_SIZE);
UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
return 0;
}
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
static void atmel_tasklet_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status;
unsigned int status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
atmel_port->schedule_tx(port);
status = atmel_port->irq_status;
status_change = status ^ atmel_port->irq_status_prev;
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
if (status_change & ATMEL_US_RI)
port->icount.rng++;
if (status_change & ATMEL_US_DSR)
port->icount.dsr++;
if (status_change & ATMEL_US_DCD)
uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
if (status_change & ATMEL_US_CTS)
uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
wake_up_interruptible(&port->state->port.delta_msr_wait);
atmel_port->irq_status_prev = status;
}
atmel_port->schedule_rx(port);
spin_unlock(&port->lock);
}
static void atmel_set_ops(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_rx(port)) {
atmel_port->prepare_rx = &atmel_prepare_rx_dma;
atmel_port->schedule_rx = &atmel_rx_from_dma;
atmel_port->release_rx = &atmel_release_rx_dma;
} else if (atmel_use_pdc_rx(port)) {
atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
atmel_port->schedule_rx = &atmel_rx_from_pdc;
atmel_port->release_rx = &atmel_release_rx_pdc;
} else {
atmel_port->prepare_rx = NULL;
atmel_port->schedule_rx = &atmel_rx_from_ring;
atmel_port->release_rx = NULL;
}
if (atmel_use_dma_tx(port)) {
atmel_port->prepare_tx = &atmel_prepare_tx_dma;
atmel_port->schedule_tx = &atmel_tx_dma;
atmel_port->release_tx = &atmel_release_tx_dma;
} else if (atmel_use_pdc_tx(port)) {
atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
atmel_port->schedule_tx = &atmel_tx_pdc;
atmel_port->release_tx = &atmel_release_tx_pdc;
} else {
atmel_port->prepare_tx = NULL;
atmel_port->schedule_tx = &atmel_tx_chars;
atmel_port->release_tx = NULL;
}
}
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int retval;
/*
* Ensure that no interrupts are enabled otherwise when
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
UART_PUT_IDR(port, -1);
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
tty ? tty->name : "atmel_serial", port);
printk("atmel_serial: atmel_startup - Can't get irq\n");
return retval;
}
/*
* Initialize DMA (if necessary)
*/
if (atmel_port->prepare_rx) {
retval = atmel_port->prepare_rx(port);
if (retval < 0)
atmel_set_ops(port);
if (atmel_port->prepare_tx) {
retval = atmel_port->prepare_tx(port);
if (retval < 0)
atmel_set_ops(port);
/*
* If there is a specific "open" function (to register
* control line interrupts)
*/
if (atmel_open_hook) {
retval = atmel_open_hook(port);
if (retval) {
free_irq(port->irq, port);
return retval;
}
}
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = UART_GET_CSR(port);
atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
UART_PUT_CR(port, ATMEL_US_STTTO);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
/* enable PDC controller */
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else if (atmel_use_dma_rx(port)) {
UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
UART_PUT_CR(port, ATMEL_US_STTTO);
UART_PUT_IER(port, ATMEL_US_TIMEOUT);
} else {
/* enable receive only */
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
return 0;
}
/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/*
* Ensure everything is stopped.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
/*
* Shut-down the DMA.
*/
if (atmel_port->release_rx)
atmel_port->release_rx(port);
if (atmel_port->release_tx)
atmel_port->release_tx(port);
/*
* Disable all interrupts, port and break condition.
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, -1);
/*
* Free the interrupt
*/
free_irq(port->irq, port);
/*
* If there is a specific "close" function (to unregister
* control line interrupts)
*/
if (atmel_close_hook)
atmel_close_hook(port);
/*
* Flush any TX data submitted for DMA. Called when the TX circular
* buffer is reset.
*/
static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_pdc_tx(port)) {
UART_PUT_TCR(port, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
/*
* Power / Clock management.
*/
static void atmel_serial_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
case 0:
/*
* Enable the peripheral clock for this serial port.
* This is called on uart_open() or a resume event.
*/
clk_prepare_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
UART_PUT_IER(port, atmel_port->backup_imr);
/* Back up the interrupt mask and disable all interrupts */
atmel_port->backup_imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
*/
clk_disable_unprepare(atmel_port->clk);
break;
default:
printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
}
}
/*
* Change the port parameters
*/
static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
| ATMEL_US_NBSTOP | ATMEL_US_PAR
| ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
quot /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
}
/* byte size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mode |= ATMEL_US_CHRL_5;
break;
case CS6:
mode |= ATMEL_US_CHRL_6;
break;
case CS7:
mode |= ATMEL_US_CHRL_7;
mode |= ATMEL_US_CHRL_8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
mode |= ATMEL_US_NBSTOP_2;
/* parity */
if (termios->c_cflag & PARENB) {
/* Mark or Space parity */
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_MARK;
mode |= ATMEL_US_PAR_SPACE;
mode |= ATMEL_US_PAR_ODD;
mode |= ATMEL_US_PAR_EVEN;
mode |= ATMEL_US_PAR_NONE;
/* hardware handshake (RTS/CTS) */
if (termios->c_cflag & CRTSCTS)
mode |= ATMEL_US_USMODE_HWHS;
else
mode |= ATMEL_US_USMODE_NORMAL;
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= ATMEL_US_RXBRK;
if (atmel_use_pdc_rx(port))
/* need to enable error interrupts */
UART_PUT_IER(port, port->read_status_mask);
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= ATMEL_US_RXBRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= ATMEL_US_OVRE;
/* TODO: Ignore all characters if CREAD is set.*/
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/*
* save/disable interrupts. The tty layer will ensure that the
* transmitter is empty if requested by the caller, so there's
* no need to wait for it here.
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
/* set the parity, stop bits and data size */
UART_PUT_MR(port, mode);
/* set the baud rate */
UART_PUT_BRGR(port, quot);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
/* restore interrupts */
UART_PUT_IER(port, imr);
/* CTS flow-control and modem-status interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
port->ops->enable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void atmel_set_ldisc(struct uart_port *port, int new)
{
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
}
}
/*
* Return string describing the specified port
*/
static const char *atmel_type(struct uart_port *port)
return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void atmel_release_port(struct uart_port *port)
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
release_mem_region(port->mapbase, size);
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int atmel_request_port(struct uart_port *port)
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (port->membase == NULL) {
release_mem_region(port->mapbase, size);
return -ENOMEM;
}
}
}
/*
* Configure/autoconfigure the port.
*/
static void atmel_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_ATMEL;
atmel_request_port(port);
}
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
*/
static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
ret = -EINVAL;
if (port->irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (port->uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)port->mapbase != ser->iomem_base)
ret = -EINVAL;
if (port->iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
#ifdef CONFIG_CONSOLE_POLL
static int atmel_poll_get_char(struct uart_port *port)
{
while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
cpu_relax();
return UART_GET_CHAR(port);
}
static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
#endif
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
static int
atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
{
struct serial_rs485 rs485conf;
switch (cmd) {
case TIOCSRS485:
if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
sizeof(rs485conf)))
return -EFAULT;
atmel_config_rs485(port, &rs485conf);
break;
case TIOCGRS485:
if (copy_to_user((struct serial_rs485 *) arg,
&(to_atmel_uart_port(port)->rs485),
sizeof(rs485conf)))
return -EFAULT;
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
.get_mctrl = atmel_get_mctrl,
.stop_tx = atmel_stop_tx,
.start_tx = atmel_start_tx,
.stop_rx = atmel_stop_rx,
.enable_ms = atmel_enable_ms,
.break_ctl = atmel_break_ctl,
.startup = atmel_startup,
.shutdown = atmel_shutdown,
.flush_buffer = atmel_flush_buffer,
.set_termios = atmel_set_termios,
.type = atmel_type,
.release_port = atmel_release_port,
.request_port = atmel_request_port,
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
.ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
#endif
static void atmel_of_init_port(struct atmel_uart_port *atmel_port,
struct device_node *np)
{
u32 rs485_delay[2];
/* DMA/PDC usage specification */
if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
if (of_get_property(np, "dmas", NULL)) {
atmel_port->use_dma_rx = true;
atmel_port->use_pdc_rx = false;
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = true;
}
} else {
atmel_port->use_dma_rx = false;
atmel_port->use_pdc_rx = false;
if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
if (of_get_property(np, "dmas", NULL)) {
atmel_port->use_dma_tx = true;
atmel_port->use_pdc_tx = false;
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = true;
}
} else {
atmel_port->use_dma_tx = false;
atmel_port->use_pdc_tx = false;
/* rs485 properties */
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
struct serial_rs485 *rs485conf = &atmel_port->rs485;
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
rs485conf->flags = 0;
if (of_get_property(np, "rs485-rx-during-tx", NULL))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
rs485conf->flags |= SER_RS485_ENABLED;
}
}
/*
* Configure the port from the platform device resource info.
*/
static int atmel_init_port(struct atmel_uart_port *atmel_port,
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *pdata = pdev->dev.platform_data;
if (pdev->dev.of_node) {
atmel_of_init_port(atmel_port, pdev->dev.of_node);
} else {
atmel_port->use_pdc_rx = pdata->use_dma_rx;
atmel_port->use_pdc_tx = pdata->use_dma_tx;
atmel_port->rs485 = pdata->rs485;
}
atmel_set_ops(port);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
(unsigned long)port);
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
if (pdata && pdata->regs) {
Haavard Skinnemoen
committed
/* Already mapped by setup code */
port->membase = pdata->regs;
port->flags |= UPF_IOREMAP;
port->membase = NULL;
}
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
atmel_port->clk = clk_get(&pdev->dev, "usart");
if (IS_ERR(atmel_port->clk)) {
ret = PTR_ERR(atmel_port->clk);
atmel_port->clk = NULL;
return ret;
}
ret = clk_prepare_enable(atmel_port->clk);
if (ret) {
clk_put(atmel_port->clk);
atmel_port->clk = NULL;
return ret;
}
port->uartclk = clk_get_rate(atmel_port->clk);
clk_disable_unprepare(atmel_port->clk);
/* only enable clock when USART is in use */
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_pdc_tx(port)) {
atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
} else {
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}