Newer
Older
spin_unlock(&chip->reg_lock);
/* get the current DMA position with correction on VIA chips */
static unsigned int azx_via_get_position(struct azx *chip,
struct azx_dev *azx_dev)
{
unsigned int link_pos, mini_pos, bound_pos;
unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
unsigned int fifo_size;
link_pos = azx_sd_readl(azx_dev, SD_LPIB);
if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
/* Playback, no problem using link position */
return link_pos;
}
/* Capture */
/* For new chipset,
* use mod to get the DMA position just like old chipset
*/
mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
mod_dma_pos %= azx_dev->period_bytes;
/* azx_dev->fifo_size can't get FIFO size of in stream.
* Get from base address + offset.
*/
fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
if (azx_dev->insufficient) {
/* Link position never gather than FIFO size */
if (link_pos <= fifo_size)
return 0;
azx_dev->insufficient = 0;
}
if (link_pos <= fifo_size)
mini_pos = azx_dev->bufsize + link_pos - fifo_size;
else
mini_pos = link_pos - fifo_size;
/* Find nearest previous boudary */
mod_mini_pos = mini_pos % azx_dev->period_bytes;
mod_link_pos = link_pos % azx_dev->period_bytes;
if (mod_link_pos >= fifo_size)
bound_pos = link_pos - mod_link_pos;
else if (mod_dma_pos >= mod_mini_pos)
bound_pos = mini_pos - mod_mini_pos;
else {
bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
if (bound_pos >= azx_dev->bufsize)
bound_pos = 0;
}
/* Calculate real DMA position we want */
return bound_pos + mod_dma_pos;
}
static unsigned int azx_get_position(struct azx *chip,
struct azx_dev *azx_dev,
bool with_check)
struct snd_pcm_substream *substream = azx_dev->substream;
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
int stream = substream->stream;
struct hda_pcm_stream *hinfo = apcm->hinfo[stream];
int delay = 0;
switch (chip->position_fix[stream]) {
case POS_FIX_LPIB:
/* read LPIB */
pos = azx_sd_readl(azx_dev, SD_LPIB);
break;
case POS_FIX_VIACOMBO:
pos = azx_via_get_position(chip, azx_dev);
break;
default:
/* use the position buffer */
pos = le32_to_cpu(*azx_dev->posbuf);
if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
if (!pos || pos == (u32)-1) {
dev_info(chip->card->dev,
"Invalid position buffer, using LPIB read method instead.\n");
chip->position_fix[stream] = POS_FIX_LPIB;
pos = azx_sd_readl(azx_dev, SD_LPIB);
} else
chip->position_fix[stream] = POS_FIX_POSBUF;
}
break;
/* calculate runtime delay from LPIB */
chip->position_fix[stream] == POS_FIX_POSBUF &&
(chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
unsigned int lpib_pos = azx_sd_readl(azx_dev, SD_LPIB);
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
delay = pos - lpib_pos;
else
delay = lpib_pos - pos;
if (delay < 0) {
if (delay >= azx_dev->delay_negative_threshold)
delay = 0;
else
delay += azx_dev->bufsize;
}
if (delay >= azx_dev->period_bytes) {
dev_info(chip->card->dev,
"Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
delay, azx_dev->period_bytes);
delay = 0;
chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
delay = bytes_to_frames(substream->runtime, delay);
if (substream->runtime) {
if (hinfo->ops.get_delay)
delay += hinfo->ops.get_delay(hinfo, apcm->codec,
substream);
substream->runtime->delay = delay;
}
trace_azx_get_position(chip, azx_dev, pos, delay);
return pos;
}
static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
return bytes_to_frames(substream->runtime,
azx_get_position(chip, azx_dev, false));
}
/*
* Check whether the current DMA position is acceptable for updating
* periods. Returns non-zero if it's OK.
*
* Many HD-audio controllers appear pretty inaccurate about
* the update-IRQ timing. The IRQ is issued before actually the
* data is processed. So, we need to process it afterwords in a
* workqueue.
*/
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
{
u32 wallclk;
wallclk = azx_readl(chip, WALLCLK) - azx_dev->start_wallclk;
if (wallclk < (azx_dev->period_wallclk * 2) / 3)
return -1; /* bogus (too early) interrupt */
pos = azx_get_position(chip, azx_dev, true);
if (WARN_ONCE(!azx_dev->period_bytes,
"hda-intel: zero azx_dev->period_bytes"))
return -1; /* this shouldn't happen! */
if (wallclk < (azx_dev->period_wallclk * 5) / 4 &&
pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
/* NG - it's below the first next period boundary */
return bdl_pos_adj[chip->dev_index] ? 0 : -1;
azx_dev->start_wallclk += wallclk;
return 1; /* OK, it's fine */
}
/*
* The work for pending PCM period updates.
*/
static void azx_irq_pending_work(struct work_struct *work)
{
struct azx *chip = container_of(work, struct azx, irq_pending_work);
int i, pending, ok;
if (!chip->irq_pending_warned) {
dev_info(chip->card->dev,
"IRQ timing workaround is activated for card #%d. Suggest a bigger bdl_pos_adj.\n",
chip->card->number);
chip->irq_pending_warned = 1;
}
for (;;) {
pending = 0;
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
if (!azx_dev->irq_pending ||
!azx_dev->substream ||
!azx_dev->running)
continue;
ok = azx_position_ok(chip, azx_dev);
if (ok > 0) {
azx_dev->irq_pending = 0;
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(azx_dev->substream);
spin_lock(&chip->reg_lock);
} else if (ok < 0) {
pending = 0; /* too early */
} else
pending++;
}
spin_unlock_irq(&chip->reg_lock);
if (!pending)
return;
}
}
/* clear irq_pending flags and assure no on-going workq */
static void azx_clear_irq_pending(struct azx *chip)
{
int i;
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_streams; i++)
chip->azx_dev[i].irq_pending = 0;
spin_unlock_irq(&chip->reg_lock);
#ifdef CONFIG_X86
static int azx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
if (!azx_snoop(chip))
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return snd_pcm_lib_default_mmap(substream, area);
}
#else
#define azx_pcm_mmap NULL
#endif
static struct snd_pcm_ops azx_pcm_ops = {
.open = azx_pcm_open,
.close = azx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = azx_pcm_hw_params,
.hw_free = azx_pcm_hw_free,
.prepare = azx_pcm_prepare,
.trigger = azx_pcm_trigger,
.pointer = azx_pcm_pointer,
.wall_clock = azx_get_wallclock_tstamp,
static void azx_pcm_free(struct snd_pcm *pcm)
struct azx_pcm *apcm = pcm->private_data;
if (apcm) {
kfree(apcm);
}
#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
struct hda_pcm *cpcm)
struct azx *chip = bus->private_data;
int pcm_dev = cpcm->device;
int s, err;
list_for_each_entry(apcm, &chip->pcm_list, list) {
if (apcm->pcm->device == pcm_dev) {
dev_err(chip->card->dev, "PCM %d already exists\n",
pcm_dev);
}
err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
if (apcm == NULL)
return -ENOMEM;
apcm->chip = chip;
apcm->codec = codec;
pcm->private_data = apcm;
pcm->private_free = azx_pcm_free;
if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
list_add_tail(&apcm->list, &chip->pcm_list);
cpcm->pcm = pcm;
for (s = 0; s < 2; s++) {
apcm->hinfo[s] = &cpcm->stream[s];
if (cpcm->stream[s].substreams)
snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
}
/* buffer pre-allocation */
size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
size, MAX_PREALLOC_SIZE);
/* link to codec */
pcm->dev = &codec->dev;
return 0;
}
/*
* mixer creation - all stuff is implemented in hda module
*/
static int azx_mixer_create(struct azx *chip)
{
return snd_hda_build_controls(chip->bus);
}
/*
* initialize SD streams
*/
static int azx_init_stream(struct azx *chip)
{
int i;
/* initialize each stream (aka device)
* assign the starting bdl address to each stream (device)
* and initialize
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
azx_dev->sd_int_sta_mask = 1 << i;
/* stream tag: must be non-zero and unique */
azx_dev->index = i;
azx_dev->stream_tag = i + 1;
}
return 0;
}
static int azx_acquire_irq(struct azx *chip, int do_disconnect)
{
if (request_irq(chip->pci->irq, azx_interrupt,
chip->msi ? 0 : IRQF_SHARED,
KBUILD_MODNAME, chip)) {
dev_err(chip->card->dev,
"unable to grab IRQ %d, disabling device\n",
chip->pci->irq);
if (do_disconnect)
snd_card_disconnect(chip->card);
return -1;
}
chip->irq = chip->pci->irq;
pci_intx(chip->pci, !chip->msi);
static void azx_stop_chip(struct azx *chip)
{
return;
/* disable interrupts */
azx_int_disable(chip);
azx_int_clear(chip);
/* disable CORB/RIRB */
azx_free_cmd_io(chip);
/* disable position buffer */
azx_writel(chip, DPLBASE, 0);
azx_writel(chip, DPUBASE, 0);
chip->initialized = 0;
}
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
#ifdef CONFIG_SND_HDA_DSP_LOADER
/*
* DSP loading code (e.g. for CA0132)
*/
/* use the first stream for loading DSP */
static struct azx_dev *
azx_get_dsp_loader_dev(struct azx *chip)
{
return &chip->azx_dev[chip->playback_index_offset];
}
static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
unsigned int byte_size,
struct snd_dma_buffer *bufp)
{
u32 *bdl;
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev;
int err;
azx_dev = azx_get_dsp_loader_dev(chip);
dsp_lock(azx_dev);
spin_lock_irq(&chip->reg_lock);
if (azx_dev->running || azx_dev->locked) {
spin_unlock_irq(&chip->reg_lock);
err = -EBUSY;
goto unlock;
}
azx_dev->prepared = 0;
chip->saved_azx_dev = *azx_dev;
azx_dev->locked = 1;
spin_unlock_irq(&chip->reg_lock);
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
byte_size, bufp);
if (err < 0)
goto err_alloc;
mark_pages_wc(chip, bufp, true);
azx_dev->bufsize = byte_size;
azx_dev->period_bytes = byte_size;
azx_dev->format_val = format;
azx_stream_reset(chip, azx_dev);
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_dev->frags = 0;
bdl = (u32 *)azx_dev->bdl.area;
err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
if (err < 0)
goto error;
azx_setup_controller(chip, azx_dev);
dsp_unlock(azx_dev);
return azx_dev->stream_tag;
error:
mark_pages_wc(chip, bufp, false);
snd_dma_free_pages(bufp);
err_alloc:
spin_lock_irq(&chip->reg_lock);
if (azx_dev->opened)
*azx_dev = chip->saved_azx_dev;
azx_dev->locked = 0;
spin_unlock_irq(&chip->reg_lock);
unlock:
dsp_unlock(azx_dev);
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
return err;
}
static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
{
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
if (start)
azx_stream_start(chip, azx_dev);
else
azx_stream_stop(chip, azx_dev);
azx_dev->running = start;
}
static void azx_load_dsp_cleanup(struct hda_bus *bus,
struct snd_dma_buffer *dmab)
{
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
if (!dmab->area || !azx_dev->locked)
return;
dsp_lock(azx_dev);
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_sd_writel(azx_dev, SD_CTL, 0);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
mark_pages_wc(chip, dmab, false);
snd_dma_free_pages(dmab);
dmab->area = NULL;
spin_lock_irq(&chip->reg_lock);
if (azx_dev->opened)
*azx_dev = chip->saved_azx_dev;
azx_dev->locked = 0;
spin_unlock_irq(&chip->reg_lock);
dsp_unlock(azx_dev);
}
#endif /* CONFIG_SND_HDA_DSP_LOADER */
/* power-up/down the controller */
static void azx_power_notify(struct hda_bus *bus, bool power_up)
struct azx *chip = bus->private_data;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return;
pm_runtime_get_sync(&chip->pci->dev);
else
pm_runtime_put_sync(&chip->pci->dev);
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
static DEFINE_MUTEX(card_list_lock);
static LIST_HEAD(card_list);
static void azx_add_card_list(struct azx *chip)
{
mutex_lock(&card_list_lock);
list_add(&chip->list, &card_list);
mutex_unlock(&card_list_lock);
}
static void azx_del_card_list(struct azx *chip)
{
mutex_lock(&card_list_lock);
list_del_init(&chip->list);
mutex_unlock(&card_list_lock);
}
/* trigger power-save check at writing parameter */
static int param_set_xint(const char *val, const struct kernel_param *kp)
{
struct azx *chip;
struct hda_codec *c;
int prev = power_save;
int ret = param_set_int(val, kp);
if (ret || prev == power_save)
return ret;
mutex_lock(&card_list_lock);
list_for_each_entry(chip, &card_list, list) {
if (!chip->bus || chip->disabled)
continue;
list_for_each_entry(c, &chip->bus->codec_list, list)
snd_hda_power_sync(c);
}
mutex_unlock(&card_list_lock);
return 0;
}
#else
#define azx_add_card_list(chip) /* NOP */
#define azx_del_card_list(chip) /* NOP */
#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
/*
* power management
*/
static int azx_suspend(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->disabled)
return 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
list_for_each_entry(p, &chip->pcm_list, list)
snd_pcm_suspend_all(p->pcm);
if (chip->initialized)
snd_hda_suspend(chip->bus);
if (chip->irq >= 0) {
free_irq(chip->irq, chip);
chip->irq = -1;
}
pci_disable_msi(chip->pci);
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, PCI_D3hot);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
static int azx_resume(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->disabled)
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(true);
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
dev_err(chip->card->dev,
"pci_enable_device failed, disabling device\n");
snd_card_disconnect(card);
return -EIO;
}
pci_set_master(pci);
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
#ifdef CONFIG_PM_RUNTIME
static int azx_runtime_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->disabled)
return 0;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return 0;
/* enable controller wake up event */
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
STATESTS_INT_MASK);
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(false);
return 0;
}
static int azx_runtime_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_bus *bus;
struct hda_codec *codec;
int status;
if (chip->disabled)
return 0;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(true);
/* Read STATESTS before controller reset */
status = azx_readw(chip, STATESTS);
azx_init_pci(chip);
azx_init_chip(chip, 1);
bus = chip->bus;
if (status && bus) {
list_for_each_entry(codec, &bus->codec_list, list)
if (status & (1 << codec->addr))
queue_delayed_work(codec->bus->workq,
&codec->jackpoll_work, codec->jackpoll_interval);
}
/* disable controller Wake Up event*/
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
~STATESTS_INT_MASK);
static int azx_runtime_idle(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->disabled)
return 0;
if (!power_save_controller ||
!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return -EBUSY;
return 0;
}
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM
static const struct dev_pm_ops azx_pm = {
SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
#define AZX_PM_OPS &azx_pm
#else
#define AZX_PM_OPS NULL
/*
* reboot notifier for hang-up problem at power-down
*/
static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
struct azx *chip = container_of(nb, struct azx, reboot_notifier);
snd_hda_bus_reboot_notify(chip->bus);
azx_stop_chip(chip);
return NOTIFY_OK;
}
static void azx_notifier_register(struct azx *chip)
{
chip->reboot_notifier.notifier_call = azx_halt;
register_reboot_notifier(&chip->reboot_notifier);
}
static void azx_notifier_unregister(struct azx *chip)
{
if (chip->reboot_notifier.notifier_call)
unregister_reboot_notifier(&chip->reboot_notifier);
}
static int azx_probe_continue(struct azx *chip);
#ifdef SUPPORT_VGA_SWITCHEROO
static struct pci_dev *get_bound_vga(struct pci_dev *pci);
static void azx_vs_set_state(struct pci_dev *pci,
enum vga_switcheroo_state state)
{
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
bool disabled;
wait_for_completion(&chip->probe_wait);
if (chip->init_failed)
return;
disabled = (state == VGA_SWITCHEROO_OFF);
if (chip->disabled == disabled)
return;
if (!chip->bus) {
chip->disabled = disabled;
if (!disabled) {
dev_info(chip->card->dev,
"Start delayed initialization\n");
if (azx_probe_continue(chip) < 0) {
dev_err(chip->card->dev, "initialization error\n");
chip->init_failed = true;
}
}
} else {
dev_info(chip->card->dev, "%s via VGA-switcheroo\n",
disabled ? "Disabling" : "Enabling");
pm_runtime_put_sync_suspend(&pci->dev);
/* when we get suspended by vga switcheroo we end up in D3cold,
* however we have no ACPI handle, so pci/acpi can't put us there,
* put ourselves there */
pci->current_state = PCI_D3cold;
if (snd_hda_lock_devices(chip->bus))
dev_warn(chip->card->dev,
"Cannot lock devices!\n");
} else {
snd_hda_unlock_devices(chip->bus);
pm_runtime_get_noresume(&pci->dev);
}
}
}
static bool azx_vs_can_switch(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
wait_for_completion(&chip->probe_wait);
if (chip->init_failed)
return false;
if (chip->disabled || !chip->bus)
return true;
if (snd_hda_lock_devices(chip->bus))
return false;
snd_hda_unlock_devices(chip->bus);
return true;
}
static void init_vga_switcheroo(struct azx *chip)
{
struct pci_dev *p = get_bound_vga(chip->pci);
if (p) {
dev_info(chip->card->dev,
"Handle VGA-switcheroo audio client\n");
chip->use_vga_switcheroo = 1;
pci_dev_put(p);
}
}
static const struct vga_switcheroo_client_ops azx_vs_ops = {
.set_gpu_state = azx_vs_set_state,
.can_switch = azx_vs_can_switch,
};
static int register_vga_switcheroo(struct azx *chip)
if (!chip->use_vga_switcheroo)
return 0;
/* FIXME: currently only handling DIS controller
* is there any machine with two switchable HDMI audio controllers?
*/
err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
VGA_SWITCHEROO_DIS,
chip->bus != NULL);
if (err < 0)
return err;
chip->vga_switcheroo_registered = 1;
/* register as an optimus hdmi audio power domain */
vga_switcheroo_init_domain_pm_optimus_hdmi_audio(&chip->pci->dev, &chip->hdmi_pm_domain);
}
#else
#define init_vga_switcheroo(chip) /* NOP */
#define register_vga_switcheroo(chip) 0
#define check_hdmi_disabled(pci) false
#endif /* SUPPORT_VGA_SWITCHER */
static int azx_free(struct azx *chip)
if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
&& chip->running)
pm_runtime_get_noresume(&pci->dev);
azx_del_card_list(chip);
chip->init_failed = 1; /* to be sure */
if (use_vga_switcheroo(chip)) {
if (chip->disabled && chip->bus)
snd_hda_unlock_devices(chip->bus);
if (chip->vga_switcheroo_registered)
vga_switcheroo_unregister_client(chip->pci);
for (i = 0; i < chip->num_streams; i++)
pci_disable_msi(chip->pci);
if (chip->remap_addr)
iounmap(chip->remap_addr);
if (chip->azx_dev) {
for (i = 0; i < chip->num_streams; i++)
if (chip->azx_dev[i].bdl.area) {
mark_pages_wc(chip, &chip->azx_dev[i].bdl, false);
snd_dma_free_pages(&chip->azx_dev[i].bdl);
if (chip->rb.area) {
mark_pages_wc(chip, &chip->rb, false);
}
if (chip->posbuf.area) {
mark_pages_wc(chip, &chip->posbuf, false);
if (chip->region_requested)
pci_release_regions(chip->pci);
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (chip->fw)
release_firmware(chip->fw);
#endif
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
hda_display_power(false);
hda_i915_exit();
}
static int azx_dev_free(struct snd_device *device)
{
return azx_free(device->device_data);
}
#ifdef SUPPORT_VGA_SWITCHEROO
/*
* Check of disabled HDMI controller by vga-switcheroo
*/
static struct pci_dev *get_bound_vga(struct pci_dev *pci)
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
{
struct pci_dev *p;
/* check only discrete GPU */
switch (pci->vendor) {
case PCI_VENDOR_ID_ATI:
case PCI_VENDOR_ID_AMD:
case PCI_VENDOR_ID_NVIDIA:
if (pci->devfn == 1) {
p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
pci->bus->number, 0);
if (p) {
if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
return p;
pci_dev_put(p);
}
}
break;
}
return NULL;
}
static bool check_hdmi_disabled(struct pci_dev *pci)
{
bool vga_inactive = false;
struct pci_dev *p = get_bound_vga(pci);
if (p) {
if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF)
vga_inactive = true;
pci_dev_put(p);
}
return vga_inactive;
}
#endif /* SUPPORT_VGA_SWITCHEROO */
/*
* white/black-listing for position_fix
*/
static struct snd_pci_quirk position_fix_list[] = {
SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB),
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB),