Newer
Older
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
}
num_command_slots = dd->slot_groups * 32;
hba_setup(dd);
tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
if (!dd->port) {
dev_err(&dd->pdev->dev,
"Memory allocation: port structure\n");
return -ENOMEM;
}
/* Counting semaphore to track command slot usage */
sema_init(&dd->port->cmd_slot, num_command_slots - 1);
/* Spinlock to prevent concurrent issue */
spin_lock_init(&dd->port->cmd_issue_lock);
/* Set the port mmio base address. */
dd->port->mmio = dd->mmio + PORT_OFFSET;
dd->port->dd = dd;
/* Allocate memory for the command list. */
dd->port->command_list =
dmam_alloc_coherent(&dd->pdev->dev,
HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
&dd->port->command_list_dma,
GFP_KERNEL);
if (!dd->port->command_list) {
dev_err(&dd->pdev->dev,
"Memory allocation: command list\n");
rv = -ENOMEM;
goto out1;
}
/* Clear the memory we have allocated. */
memset(dd->port->command_list,
0,
HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
/* Setup the addresse of the RX FIS. */
dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
/* Setup the address of the command tables. */
dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
/* Setup the address of the identify data. */
dd->port->identify = dd->port->command_table +
HW_CMD_TBL_AR_SZ;
dd->port->identify_dma = dd->port->command_tbl_dma +
HW_CMD_TBL_AR_SZ;
/* Setup the address of the sector buffer - for some non-ncq cmds */
dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
/* Setup the address of the log buf - for read log command */
dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
/* Setup the address of the smart buf - for smart read data command */
dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
/* Point the command headers at the command tables. */
for (i = 0; i < num_command_slots; i++) {
dd->port->commands[i].command_header =
dd->port->command_list +
(sizeof(struct mtip_cmd_hdr) * i);
dd->port->commands[i].command_header_dma =
dd->port->command_list_dma +
(sizeof(struct mtip_cmd_hdr) * i);
dd->port->commands[i].command =
dd->port->command_table + (HW_CMD_TBL_SZ * i);
dd->port->commands[i].command_dma =
dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
dd->port->commands[i].command_header->ctbau =
(dd->port->commands[i].command_dma >> 16) >> 16);
dd->port->commands[i].command_header->ctba =
__force_bit2int cpu_to_le32(
dd->port->commands[i].command_dma & 0xFFFFFFFF);
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
/*
* If this is not done, a bug is reported by the stock
* FC11 i386. Due to the fact that it has lots of kernel
* debugging enabled.
*/
sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
/* Mark all commands as currently inactive.*/
atomic_set(&dd->port->commands[i].active, 0);
}
/* Setup the pointers to the extended s_active and CI registers. */
for (i = 0; i < dd->slot_groups; i++) {
dd->port->s_active[i] =
dd->port->mmio + i*0x80 + PORT_SCR_ACT;
dd->port->cmd_issue[i] =
dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
dd->port->completed[i] =
dd->port->mmio + i*0x80 + PORT_SDBV;
}
timetaken = jiffies;
timeout = jiffies + msecs_to_jiffies(30000);
while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
time_before(jiffies, timeout)) {
mdelay(100);
}
if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev,
"Surprise removal detected at %u ms\n",
jiffies_to_msecs(timetaken));
rv = -ENODEV;
goto out2 ;
}
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev,
"Removal detected at %u ms\n",
jiffies_to_msecs(timetaken));
rv = -EFAULT;
goto out2;
}
/* Conditionally reset the HBA. */
if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
if (mtip_hba_reset(dd) < 0) {
dev_err(&dd->pdev->dev,
"Card did not reset within timeout\n");
rv = -EIO;
goto out2;
}
} else {
/* Clear any pending interrupts on the HBA */
writel(readl(dd->mmio + HOST_IRQ_STAT),
dd->mmio + HOST_IRQ_STAT);
}
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
mtip_init_port(dd->port);
mtip_start_port(dd->port);
/* Setup the ISR and enable interrupts. */
rv = devm_request_irq(&dd->pdev->dev,
dd->pdev->irq,
mtip_irq_handler,
IRQF_SHARED,
dev_driver_string(&dd->pdev->dev),
dd);
if (rv) {
dev_err(&dd->pdev->dev,
"Unable to allocate IRQ %d\n", dd->pdev->irq);
goto out2;
}
/* Enable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd->mmio + HOST_CTL);
init_timer(&dd->port->cmd_timer);
init_waitqueue_head(&dd->port->svc_wait);
dd->port->cmd_timer.data = (unsigned long int) dd->port;
dd->port->cmd_timer.function = mtip_timeout_function;
mod_timer(&dd->port->cmd_timer,
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
rv = -EFAULT;
goto out3;
}
if (mtip_get_identify(dd->port, NULL) < 0) {
rv = -EFAULT;
goto out3;
}
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
return MTIP_FTL_REBUILD_MAGIC;
mtip_dump_identify(dd->port);
/* check write protect, over temp and rebuild statuses */
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
dd->port->log_buf,
dd->port->log_buf_dma, 1);
if (rv) {
dev_warn(&dd->pdev->dev,
"Error in READ LOG EXT (10h) command\n");
/* non-critical error, don't fail the load */
} else {
buf = (unsigned char *)dd->port->log_buf;
if (buf[259] & 0x1) {
dev_info(&dd->pdev->dev,
"Write protect bit is set.\n");
set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
}
if (buf[288] == 0xF7) {
dev_info(&dd->pdev->dev,
"Exceeded Tmax, drive in thermal shutdown.\n");
set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
}
if (buf[288] == 0xBF) {
dev_info(&dd->pdev->dev,
"Drive indicates rebuild has failed.\n");
/* TODO */
}
}
/* get write protect progess */
memset(&attr242, 0, sizeof(struct smart_attr));
if (mtip_get_smart_attr(dd->port, 242, &attr242))
dev_warn(&dd->pdev->dev,
"Unable to check write protect progress\n");
else
dev_info(&dd->pdev->dev,
"Write protect progress: %u%% (%u blocks)\n",
attr242.cur, le32_to_cpu(attr242.data));
return rv;
out3:
del_timer_sync(&dd->port->cmd_timer);
/* Disable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
/*Release the IRQ. */
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
out2:
mtip_deinit_port(dd->port);
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
dd->port->command_list,
dd->port->command_list_dma);
out1:
/* Free the memory allocated for the for structure. */
kfree(dd->port);
return rv;
}
/*
* Called to deinitialize an interface.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_hw_exit(struct driver_data *dd)
{
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
if (mtip_standby_immediate(dd->port))
dev_warn(&dd->pdev->dev,
"STANDBY IMMEDIATE failed\n");
/* de-initialize the port. */
mtip_deinit_port(dd->port);
/* Disable interrupts on the HBA. */
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
}
del_timer_sync(&dd->port->cmd_timer);
/* Release the IRQ. */
devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
/* Stop the bottom half tasklet. */
tasklet_kill(&dd->tasklet);
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
dd->port->command_list,
dd->port->command_list_dma);
/* Free the memory allocated for the for structure. */
kfree(dd->port);
return 0;
}
/*
* Issue a Standby Immediate command to the device.
*
* This function is called by the Block Layer just before the
* system powers off during a shutdown.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_hw_shutdown(struct driver_data *dd)
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
{
/*
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
mtip_standby_immediate(dd->port);
return 0;
}
/*
* Suspend function
*
* This function is called by the Block Layer just before the
* system hibernates.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 Suspend was successful
* -EFAULT Suspend was not successful
*/
static int mtip_hw_suspend(struct driver_data *dd)
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
{
/*
* Send standby immediate (E0h) to the drive
* so that it saves its state.
*/
if (mtip_standby_immediate(dd->port) != 0) {
dev_err(&dd->pdev->dev,
"Failed standby-immediate command\n");
return -EFAULT;
}
/* Disable interrupts on the HBA.*/
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
dd->mmio + HOST_CTL);
mtip_deinit_port(dd->port);
return 0;
}
/*
* Resume function
*
* This function is called by the Block Layer as the
* system resumes.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 Resume was successful
* -EFAULT Resume was not successful
*/
static int mtip_hw_resume(struct driver_data *dd)
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
{
/* Perform any needed hardware setup steps */
hba_setup(dd);
/* Reset the HBA */
if (mtip_hba_reset(dd) != 0) {
dev_err(&dd->pdev->dev,
"Unable to reset the HBA\n");
return -EFAULT;
}
/*
* Enable the port, DMA engine, and FIS reception specific
* h/w in controller.
*/
mtip_init_port(dd->port);
mtip_start_port(dd->port);
/* Enable interrupts on the HBA.*/
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
dd->mmio + HOST_CTL);
return 0;
}
/*
* Helper function for reusing disk name
* upon hot insertion.
*/
static int rssd_disk_name_format(char *prefix,
int index,
char *buf,
int buflen)
{
const int base = 'z' - 'a' + 1;
char *begin = buf + strlen(prefix);
char *end = buf + buflen;
char *p;
int unit;
p = end - 1;
*p = '\0';
unit = base;
do {
if (p == begin)
return -EINVAL;
*--p = 'a' + (index % unit);
index = (index / unit) - 1;
} while (index >= 0);
memmove(begin, p, end - p);
memcpy(buf, prefix, strlen(prefix));
return 0;
}
/*
* Block layer IOCTL handler.
*
* @dev Pointer to the block_device structure.
* @mode ignored
* @cmd IOCTL command passed from the user application.
* @arg Argument passed from the user application.
*
* return value
* 0 IOCTL completed successfully.
* -ENOTTY IOCTL not supported or invalid driver data
* structure pointer.
*/
static int mtip_block_ioctl(struct block_device *dev,
fmode_t mode,
unsigned cmd,
unsigned long arg)
{
struct driver_data *dd = dev->bd_disk->private_data;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!dd)
return -ENOTTY;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
switch (cmd) {
case BLKFLSBUF:
return mtip_hw_ioctl(dd, cmd, arg);
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
/*
* Block layer compat IOCTL handler.
*
* @dev Pointer to the block_device structure.
* @mode ignored
* @cmd IOCTL command passed from the user application.
* @arg Argument passed from the user application.
*
* return value
* 0 IOCTL completed successfully.
* -ENOTTY IOCTL not supported or invalid driver data
* structure pointer.
*/
static int mtip_block_compat_ioctl(struct block_device *dev,
fmode_t mode,
unsigned cmd,
unsigned long arg)
{
struct driver_data *dd = dev->bd_disk->private_data;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!dd)
return -ENOTTY;
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
switch (cmd) {
case BLKFLSBUF:
struct mtip_compat_ide_task_request_s __user *compat_req_task;
ide_task_request_t req_task;
int compat_tasksize, outtotal, ret;
compat_tasksize =
sizeof(struct mtip_compat_ide_task_request_s);
compat_req_task =
(struct mtip_compat_ide_task_request_s __user *) arg;
if (copy_from_user(&req_task, (void __user *) arg,
compat_tasksize - (2 * sizeof(compat_long_t))))
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
return -EFAULT;
if (get_user(req_task.out_size, &compat_req_task->out_size))
return -EFAULT;
if (get_user(req_task.in_size, &compat_req_task->in_size))
return -EFAULT;
outtotal = sizeof(struct mtip_compat_ide_task_request_s);
ret = exec_drive_taskfile(dd, (void __user *) arg,
&req_task, outtotal);
if (copy_to_user((void __user *) arg, &req_task,
compat_tasksize -
(2 * sizeof(compat_long_t))))
return -EFAULT;
if (put_user(req_task.out_size, &compat_req_task->out_size))
return -EFAULT;
if (put_user(req_task.in_size, &compat_req_task->in_size))
return -EFAULT;
return ret;
}
return mtip_hw_ioctl(dd, cmd, arg);
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
/*
* Obtain the geometry of the device.
*
* You may think that this function is obsolete, but some applications,
* fdisk for example still used CHS values. This function describes the
* device as having 224 heads and 56 sectors per cylinder. These values are
* chosen so that each cylinder is aligned on a 4KB boundary. Since a
* partition is described in terms of a start and end cylinder this means
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
* @dev Pointer to the block_device strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
static int mtip_block_getgeo(struct block_device *dev,
struct hd_geometry *geo)
{
struct driver_data *dd = dev->bd_disk->private_data;
sector_t capacity;
if (!dd)
return -ENOTTY;
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
"Could not get drive capacity.\n");
return -ENOTTY;
}
geo->heads = 224;
geo->sectors = 56;
sector_div(capacity, (geo->heads * geo->sectors));
geo->cylinders = capacity;
return 0;
}
/*
* Block device operation function.
*
* This structure contains pointers to the functions required by the block
* layer.
*/
static const struct block_device_operations mtip_block_ops = {
.ioctl = mtip_block_ioctl,
.compat_ioctl = mtip_block_compat_ioctl,
.getgeo = mtip_block_getgeo,
.owner = THIS_MODULE
};
/*
* Block layer make request function.
*
* This function is called by the kernel to process a BIO for
* the P320 device.
*
* @queue Pointer to the request queue. Unused other than to obtain
* the driver data structure.
* @bio Pointer to the BIO.
*
*/
static void mtip_make_request(struct request_queue *queue, struct bio *bio)
{
struct driver_data *dd = queue->queuedata;
struct scatterlist *sg;
struct bio_vec *bvec;
int nents = 0;
int tag = 0;
if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag))) {
bio_endio(bio, -ENXIO);
return;
}
if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
bio_endio(bio, -ENODATA);
return;
}
if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
&dd->dd_flag) &&
bio_data_dir(bio))) {
bio_endio(bio, -ENODATA);
return;
}
if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) {
bio_endio(bio, -ENODATA);
return;
}
if (unlikely(!bio_has_data(bio))) {
blk_queue_flush(queue, 0);
bio_endio(bio, 0);
}
sg = mtip_hw_get_scatterlist(dd, &tag);
if (likely(sg != NULL)) {
blk_queue_bounce(queue, &bio);
if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
dev_warn(&dd->pdev->dev,
"Maximum number of SGL entries exceeded\n");
bio_io_error(bio);
mtip_hw_release_scatterlist(dd, tag);
}
/* Create the scatter list for this bio. */
bio_for_each_segment(bvec, bio, nents) {
sg_set_page(&sg[nents],
bvec->bv_page,
bvec->bv_len,
bvec->bv_offset);
}
/* Issue the read/write. */
mtip_hw_submit_io(dd,
bio->bi_sector,
bio_sectors(bio),
nents,
tag,
bio_endio,
bio,
bio_data_dir(bio));
bio_io_error(bio);
}
/*
* Block layer initialization function.
*
* This function is called once by the PCI layer for each P320
* device that is connected to the system.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0 on success else an error code.
*/
static int mtip_block_initialize(struct driver_data *dd)
int rv = 0, wait_for_rebuild = 0;
sector_t capacity;
unsigned int index = 0;
struct kobject *kobj;
if (dd->disk)
goto skip_create_disk; /* hw init done, before rebuild */
/* Initialize the protocol layer. */
wait_for_rebuild = mtip_hw_init(dd);
if (wait_for_rebuild < 0) {
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
dev_err(&dd->pdev->dev,
"Protocol layer initialization failed\n");
rv = -EINVAL;
goto protocol_init_error;
}
dd->disk = alloc_disk(MTIP_MAX_MINORS);
if (dd->disk == NULL) {
dev_err(&dd->pdev->dev,
"Unable to allocate gendisk structure\n");
rv = -EINVAL;
goto alloc_disk_error;
}
/* Generate the disk name, implemented same as in sd.c */
do {
if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
goto ida_get_error;
spin_lock(&rssd_index_lock);
rv = ida_get_new(&rssd_index_ida, &index);
spin_unlock(&rssd_index_lock);
} while (rv == -EAGAIN);
if (rv)
goto ida_get_error;
rv = rssd_disk_name_format("rssd",
index,
dd->disk->disk_name,
DISK_NAME_LEN);
if (rv)
goto disk_index_error;
dd->disk->driverfs_dev = &dd->pdev->dev;
dd->disk->major = dd->major;
dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS;
dd->disk->fops = &mtip_block_ops;
dd->disk->private_data = dd;
dd->index = index;
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
/*
* if rebuild pending, start the service thread, and delay the block
* queue creation and add_disk()
*/
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
goto start_service_thread;
skip_create_disk:
/* Allocate the request queue. */
dd->queue = blk_alloc_queue(GFP_KERNEL);
if (dd->queue == NULL) {
dev_err(&dd->pdev->dev,
"Unable to allocate request queue\n");
rv = -ENOMEM;
goto block_queue_alloc_init_error;
}
/* Attach our request function to the request queue. */
blk_queue_make_request(dd->queue, mtip_make_request);
dd->disk->queue = dd->queue;
dd->queue->queuedata = dd;
/* Set device limits. */
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
Asai Thambi S P
committed
/*
* write back cache is not supported in the device. FUA depends on
* write back cache support, hence setting flush support to zero.
*/
blk_queue_flush(dd->queue, 0);
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
/* Set the capacity of the device in 512 byte sectors. */
if (!(mtip_hw_get_capacity(dd, &capacity))) {
dev_warn(&dd->pdev->dev,
"Could not read drive capacity\n");
rv = -EIO;
goto read_capacity_error;
}
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
add_disk(dd->disk);
/*
* Now that the disk is active, initialize any sysfs attributes
* managed by the protocol layer.
*/
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
if (kobj) {
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
mtip_hw_debugfs_init(dd);
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
return rv; /* service thread created for handling rebuild */
start_service_thread:
sprintf(thd_name, "mtip_svc_thd_%02d", index);
dd->mtip_svc_handler = kthread_run(mtip_service_thread,
dd, thd_name);
if (IS_ERR(dd->mtip_svc_handler)) {
dev_err(&dd->pdev->dev, "service thread failed to start\n");
dd->mtip_svc_handler = NULL;
rv = -EFAULT;
goto kthread_run_error;
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
rv = wait_for_rebuild;
mtip_hw_debugfs_exit(dd);
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
read_capacity_error:
blk_cleanup_queue(dd->queue);
block_queue_alloc_init_error:
disk_index_error:
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, index);
spin_unlock(&rssd_index_lock);
ida_get_error:
put_disk(dd->disk);
alloc_disk_error:
mtip_hw_exit(dd); /* De-initialize the protocol layer. */
protocol_init_error:
return rv;
}
/*
* Block layer deinitialization function.
*
* Called by the PCI layer as each P320 device is removed.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_block_remove(struct driver_data *dd)
{
struct kobject *kobj;
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
/* Clean up the sysfs attributes, if created */
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
if (kobj) {
mtip_hw_sysfs_exit(dd, kobj);
kobject_put(kobj);
}
mtip_hw_debugfs_exit(dd);
/*
* Delete our gendisk structure. This also removes the device
* from /dev
*/
del_gendisk(dd->disk);
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
blk_cleanup_queue(dd->queue);
dd->disk = NULL;
dd->queue = NULL;
/* De-initialize the protocol layer. */
mtip_hw_exit(dd);
return 0;
}
/*
* Function called by the PCI layer when just before the
* machine shuts down.
*
* If a protocol layer shutdown function is present it will be called
* by this function.
*
* @dd Pointer to the driver data structure.
*
* return value
* 0
*/
static int mtip_block_shutdown(struct driver_data *dd)
{
dev_info(&dd->pdev->dev,
"Shutting down %s ...\n", dd->disk->disk_name);
/* Delete our gendisk structure, and cleanup the blk queue. */
del_gendisk(dd->disk);
spin_lock(&rssd_index_lock);
ida_remove(&rssd_index_ida, dd->index);
spin_unlock(&rssd_index_lock);
blk_cleanup_queue(dd->queue);
dd->disk = NULL;
dd->queue = NULL;
mtip_hw_shutdown(dd);
return 0;
}
static int mtip_block_suspend(struct driver_data *dd)
{
dev_info(&dd->pdev->dev,
"Suspending %s ...\n", dd->disk->disk_name);
mtip_hw_suspend(dd);
return 0;
}
static int mtip_block_resume(struct driver_data *dd)
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
{
dev_info(&dd->pdev->dev, "Resuming %s ...\n",
dd->disk->disk_name);
mtip_hw_resume(dd);
return 0;
}
/*
* Called for each supported PCI device detected.
*
* This function allocates the private data structure, enables the
* PCI device and then calls the block layer initialization function.
*
* return value
* 0 on success else an error code.
*/
static int mtip_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rv = 0;
struct driver_data *dd = NULL;
/* Allocate memory for this devices private data. */
dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
if (dd == NULL) {
dev_err(&pdev->dev,
"Unable to allocate memory for driver data\n");
return -ENOMEM;
}
/* Attach the private data to this PCI device. */
pci_set_drvdata(pdev, dd);
rv = pcim_enable_device(pdev);
if (rv < 0) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto iomap_err;
}
/* Map BAR5 to memory. */
rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
if (rv < 0) {
dev_err(&pdev->dev, "Unable to map regions\n");
goto iomap_err;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rv) {
rv = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
if (rv) {
dev_warn(&pdev->dev,
"64-bit DMA enable failed\n");