Newer
Older
static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
mark_runtime_wc(chip, azx_dev, substream, false);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
mark_runtime_wc(chip, azx_dev, substream, true);
static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev = get_azx_dev(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_sd_writel(azx_dev, SD_CTL, 0);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
mark_runtime_wc(chip, azx_dev, substream, false);
return snd_pcm_lib_free_pages(substream);
}
static int azx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int bufsize, period_bytes, format_val, stream_tag;
struct hda_spdif_out *spdif =
snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
unsigned short ctls = spdif ? spdif->ctls : 0;
azx_stream_reset(chip, azx_dev);
format_val = snd_hda_calc_stream_format(runtime->rate,
runtime->channels,
runtime->format,
hinfo->maxbps,
"%s: invalid format_val, rate=%d, ch=%d, format=%d\n",
pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format);
bufsize = snd_pcm_lib_buffer_bytes(substream);
period_bytes = snd_pcm_lib_period_bytes(substream);
snd_printdd(SFX "%s: azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
pci_name(chip->pci), bufsize, format_val);
if (bufsize != azx_dev->bufsize ||
period_bytes != azx_dev->period_bytes ||
format_val != azx_dev->format_val ||
runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
azx_dev->bufsize = bufsize;
azx_dev->period_bytes = period_bytes;
azx_dev->format_val = format_val;
azx_dev->no_period_wakeup = runtime->no_period_wakeup;
err = azx_setup_periods(chip, substream, azx_dev);
if (err < 0)
return err;
}
/* wallclk has 24Mhz clock source */
azx_dev->period_wallclk = (((runtime->period_size * 24000) /
runtime->rate) * 1000);
azx_setup_controller(chip, azx_dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1;
else
azx_dev->fifo_size = 0;
stream_tag = azx_dev->stream_tag;
/* CA-IBG chips need the playback stream starting from 1 */
if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
stream_tag > chip->capture_streams)
stream_tag -= chip->capture_streams;
return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
azx_dev->format_val, substream);
static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev;
struct snd_pcm_substream *s;
int rstart = 0, start, nsync = 0, sbits = 0;
azx_dev = get_azx_dev(substream);
trace_azx_pcm_trigger(chip, azx_dev, cmd);
case SNDRV_PCM_TRIGGER_START:
rstart = 1;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_SUSPEND:
return -EINVAL;
}
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
sbits |= 1 << azx_dev->index;
nsync++;
snd_pcm_trigger_done(s, substream);
}
spin_lock(&chip->reg_lock);
/* first, set SYNC bits of corresponding streams */
if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
azx_writel(chip, OLD_SSYNC,
azx_readl(chip, OLD_SSYNC) | sbits);
else
azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (start) {
azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
if (!rstart)
azx_dev->start_wallclk -=
azx_dev->period_wallclk;
} else {
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
if (start) {
/* wait until all FIFOs get ready */
for (timeout = 5000; timeout; timeout--) {
nwait = 0;
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (!(azx_sd_readb(azx_dev, SD_STS) &
SD_STS_FIFO_READY))
nwait++;
}
if (!nwait)
break;
cpu_relax();
}
} else {
/* wait until all RUN bits are cleared */
for (timeout = 5000; timeout; timeout--) {
nwait = 0;
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (azx_sd_readb(azx_dev, SD_CTL) &
SD_CTL_DMA_START)
nwait++;
}
if (!nwait)
break;
cpu_relax();
}
spin_lock(&chip->reg_lock);
/* reset SYNC bits */
if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
azx_writel(chip, OLD_SSYNC,
azx_readl(chip, OLD_SSYNC) & ~sbits);
else
azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) & ~sbits);
if (start) {
azx_timecounter_init(substream, 0, 0);
if (nsync > 1) {
cycle_t cycle_last;
/* same start cycle for master and group */
azx_dev = get_azx_dev(substream);
cycle_last = azx_dev->azx_tc.cycle_last;
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_timecounter_init(s, 1, cycle_last);
}
}
}
spin_unlock(&chip->reg_lock);
/* get the current DMA position with correction on VIA chips */
static unsigned int azx_via_get_position(struct azx *chip,
struct azx_dev *azx_dev)
{
unsigned int link_pos, mini_pos, bound_pos;
unsigned int mod_link_pos, mod_dma_pos, mod_mini_pos;
unsigned int fifo_size;
link_pos = azx_sd_readl(azx_dev, SD_LPIB);
if (azx_dev->substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
/* Playback, no problem using link position */
return link_pos;
}
/* Capture */
/* For new chipset,
* use mod to get the DMA position just like old chipset
*/
mod_dma_pos = le32_to_cpu(*azx_dev->posbuf);
mod_dma_pos %= azx_dev->period_bytes;
/* azx_dev->fifo_size can't get FIFO size of in stream.
* Get from base address + offset.
*/
fifo_size = readw(chip->remap_addr + VIA_IN_STREAM0_FIFO_SIZE_OFFSET);
if (azx_dev->insufficient) {
/* Link position never gather than FIFO size */
if (link_pos <= fifo_size)
return 0;
azx_dev->insufficient = 0;
}
if (link_pos <= fifo_size)
mini_pos = azx_dev->bufsize + link_pos - fifo_size;
else
mini_pos = link_pos - fifo_size;
/* Find nearest previous boudary */
mod_mini_pos = mini_pos % azx_dev->period_bytes;
mod_link_pos = link_pos % azx_dev->period_bytes;
if (mod_link_pos >= fifo_size)
bound_pos = link_pos - mod_link_pos;
else if (mod_dma_pos >= mod_mini_pos)
bound_pos = mini_pos - mod_mini_pos;
else {
bound_pos = mini_pos - mod_mini_pos + azx_dev->period_bytes;
if (bound_pos >= azx_dev->bufsize)
bound_pos = 0;
}
/* Calculate real DMA position we want */
return bound_pos + mod_dma_pos;
}
static unsigned int azx_get_position(struct azx *chip,
struct azx_dev *azx_dev,
bool with_check)
int stream = azx_dev->substream->stream;
int delay = 0;
switch (chip->position_fix[stream]) {
case POS_FIX_LPIB:
/* read LPIB */
pos = azx_sd_readl(azx_dev, SD_LPIB);
break;
case POS_FIX_VIACOMBO:
pos = azx_via_get_position(chip, azx_dev);
break;
default:
/* use the position buffer */
pos = le32_to_cpu(*azx_dev->posbuf);
if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) {
if (!pos || pos == (u32)-1) {
printk(KERN_WARNING
"hda-intel: Invalid position buffer, "
"using LPIB read method instead.\n");
chip->position_fix[stream] = POS_FIX_LPIB;
pos = azx_sd_readl(azx_dev, SD_LPIB);
} else
chip->position_fix[stream] = POS_FIX_POSBUF;
}
break;
/* calculate runtime delay from LPIB */
if (azx_dev->substream->runtime &&
chip->position_fix[stream] == POS_FIX_POSBUF &&
(chip->driver_caps & AZX_DCAPS_COUNT_LPIB_DELAY)) {
unsigned int lpib_pos = azx_sd_readl(azx_dev, SD_LPIB);
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
delay = pos - lpib_pos;
else
delay = lpib_pos - pos;
if (delay < 0)
delay += azx_dev->bufsize;
if (delay >= azx_dev->period_bytes) {
snd_printk(KERN_WARNING SFX
"%s: Unstable LPIB (%d >= %d); "
"disabling LPIB delay counting\n",
pci_name(chip->pci), delay, azx_dev->period_bytes);
delay = 0;
chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
}
azx_dev->substream->runtime->delay =
bytes_to_frames(azx_dev->substream->runtime, delay);
}
trace_azx_get_position(chip, azx_dev, pos, delay);
return pos;
}
static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
return bytes_to_frames(substream->runtime,
azx_get_position(chip, azx_dev, false));
}
/*
* Check whether the current DMA position is acceptable for updating
* periods. Returns non-zero if it's OK.
*
* Many HD-audio controllers appear pretty inaccurate about
* the update-IRQ timing. The IRQ is issued before actually the
* data is processed. So, we need to process it afterwords in a
* workqueue.
*/
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
{
u32 wallclk;
wallclk = azx_readl(chip, WALLCLK) - azx_dev->start_wallclk;
if (wallclk < (azx_dev->period_wallclk * 2) / 3)
return -1; /* bogus (too early) interrupt */
pos = azx_get_position(chip, azx_dev, true);
if (WARN_ONCE(!azx_dev->period_bytes,
"hda-intel: zero azx_dev->period_bytes"))
return -1; /* this shouldn't happen! */
if (wallclk < (azx_dev->period_wallclk * 5) / 4 &&
pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
/* NG - it's below the first next period boundary */
return bdl_pos_adj[chip->dev_index] ? 0 : -1;
azx_dev->start_wallclk += wallclk;
return 1; /* OK, it's fine */
}
/*
* The work for pending PCM period updates.
*/
static void azx_irq_pending_work(struct work_struct *work)
{
struct azx *chip = container_of(work, struct azx, irq_pending_work);
int i, pending, ok;
if (!chip->irq_pending_warned) {
printk(KERN_WARNING
"hda-intel: IRQ timing workaround is activated "
"for card #%d. Suggest a bigger bdl_pos_adj.\n",
chip->card->number);
chip->irq_pending_warned = 1;
}
for (;;) {
pending = 0;
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
if (!azx_dev->irq_pending ||
!azx_dev->substream ||
!azx_dev->running)
continue;
ok = azx_position_ok(chip, azx_dev);
if (ok > 0) {
azx_dev->irq_pending = 0;
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(azx_dev->substream);
spin_lock(&chip->reg_lock);
} else if (ok < 0) {
pending = 0; /* too early */
} else
pending++;
}
spin_unlock_irq(&chip->reg_lock);
if (!pending)
return;
}
}
/* clear irq_pending flags and assure no on-going workq */
static void azx_clear_irq_pending(struct azx *chip)
{
int i;
spin_lock_irq(&chip->reg_lock);
for (i = 0; i < chip->num_streams; i++)
chip->azx_dev[i].irq_pending = 0;
spin_unlock_irq(&chip->reg_lock);
#ifdef CONFIG_X86
static int azx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
if (!azx_snoop(chip))
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return snd_pcm_lib_default_mmap(substream, area);
}
#else
#define azx_pcm_mmap NULL
#endif
static struct snd_pcm_ops azx_pcm_ops = {
.open = azx_pcm_open,
.close = azx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = azx_pcm_hw_params,
.hw_free = azx_pcm_hw_free,
.prepare = azx_pcm_prepare,
.trigger = azx_pcm_trigger,
.pointer = azx_pcm_pointer,
.wall_clock = azx_get_wallclock_tstamp,
static void azx_pcm_free(struct snd_pcm *pcm)
struct azx_pcm *apcm = pcm->private_data;
if (apcm) {
kfree(apcm);
}
#define MAX_PREALLOC_SIZE (32 * 1024 * 1024)
azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
struct hda_pcm *cpcm)
struct azx *chip = bus->private_data;
int pcm_dev = cpcm->device;
int s, err;
list_for_each_entry(apcm, &chip->pcm_list, list) {
if (apcm->pcm->device == pcm_dev) {
snd_printk(KERN_ERR SFX "%s: PCM %d already exists\n",
pci_name(chip->pci), pcm_dev);
}
err = snd_pcm_new(chip->card, cpcm->name, pcm_dev,
cpcm->stream[SNDRV_PCM_STREAM_PLAYBACK].substreams,
cpcm->stream[SNDRV_PCM_STREAM_CAPTURE].substreams,
strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
if (apcm == NULL)
return -ENOMEM;
apcm->chip = chip;
apcm->codec = codec;
pcm->private_data = apcm;
pcm->private_free = azx_pcm_free;
if (cpcm->pcm_type == HDA_PCM_TYPE_MODEM)
pcm->dev_class = SNDRV_PCM_CLASS_MODEM;
list_add_tail(&apcm->list, &chip->pcm_list);
cpcm->pcm = pcm;
for (s = 0; s < 2; s++) {
apcm->hinfo[s] = &cpcm->stream[s];
if (cpcm->stream[s].substreams)
snd_pcm_set_ops(pcm, s, &azx_pcm_ops);
}
/* buffer pre-allocation */
size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
size, MAX_PREALLOC_SIZE);
return 0;
}
/*
* mixer creation - all stuff is implemented in hda module
*/
static int azx_mixer_create(struct azx *chip)
{
return snd_hda_build_controls(chip->bus);
}
/*
* initialize SD streams
*/
static int azx_init_stream(struct azx *chip)
{
int i;
/* initialize each stream (aka device)
* assign the starting bdl address to each stream (device)
* and initialize
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
azx_dev->sd_int_sta_mask = 1 << i;
/* stream tag: must be non-zero and unique */
azx_dev->index = i;
azx_dev->stream_tag = i + 1;
}
return 0;
}
static int azx_acquire_irq(struct azx *chip, int do_disconnect)
{
if (request_irq(chip->pci->irq, azx_interrupt,
chip->msi ? 0 : IRQF_SHARED,
KBUILD_MODNAME, chip)) {
printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
"disabling device\n", chip->pci->irq);
if (do_disconnect)
snd_card_disconnect(chip->card);
return -1;
}
chip->irq = chip->pci->irq;
pci_intx(chip->pci, !chip->msi);
static void azx_stop_chip(struct azx *chip)
{
return;
/* disable interrupts */
azx_int_disable(chip);
azx_int_clear(chip);
/* disable CORB/RIRB */
azx_free_cmd_io(chip);
/* disable position buffer */
azx_writel(chip, DPLBASE, 0);
azx_writel(chip, DPUBASE, 0);
chip->initialized = 0;
}
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
#ifdef CONFIG_SND_HDA_DSP_LOADER
/*
* DSP loading code (e.g. for CA0132)
*/
/* use the first stream for loading DSP */
static struct azx_dev *
azx_get_dsp_loader_dev(struct azx *chip)
{
return &chip->azx_dev[chip->playback_index_offset];
}
static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
unsigned int byte_size,
struct snd_dma_buffer *bufp)
{
u32 *bdl;
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev;
int err;
if (snd_hda_lock_devices(bus))
return -EBUSY;
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
byte_size, bufp);
if (err < 0)
goto unlock;
mark_pages_wc(chip, bufp, true);
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
azx_dev = azx_get_dsp_loader_dev(chip);
azx_dev->bufsize = byte_size;
azx_dev->period_bytes = byte_size;
azx_dev->format_val = format;
azx_stream_reset(chip, azx_dev);
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_dev->frags = 0;
bdl = (u32 *)azx_dev->bdl.area;
err = setup_bdle(chip, bufp, azx_dev, &bdl, 0, byte_size, 0);
if (err < 0)
goto error;
azx_setup_controller(chip, azx_dev);
return azx_dev->stream_tag;
error:
mark_pages_wc(chip, bufp, false);
snd_dma_free_pages(bufp);
unlock:
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
snd_hda_unlock_devices(bus);
return err;
}
static void azx_load_dsp_trigger(struct hda_bus *bus, bool start)
{
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
if (start)
azx_stream_start(chip, azx_dev);
else
azx_stream_stop(chip, azx_dev);
azx_dev->running = start;
}
static void azx_load_dsp_cleanup(struct hda_bus *bus,
struct snd_dma_buffer *dmab)
{
struct azx *chip = bus->private_data;
struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
if (!dmab->area)
return;
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_sd_writel(azx_dev, SD_CTL, 0);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
mark_pages_wc(chip, dmab, false);
snd_dma_free_pages(dmab);
dmab->area = NULL;
snd_hda_unlock_devices(bus);
}
#endif /* CONFIG_SND_HDA_DSP_LOADER */
/* power-up/down the controller */
static void azx_power_notify(struct hda_bus *bus, bool power_up)
struct azx *chip = bus->private_data;
if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return;
pm_runtime_get_sync(&chip->pci->dev);
else
pm_runtime_put_sync(&chip->pci->dev);
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
static DEFINE_MUTEX(card_list_lock);
static LIST_HEAD(card_list);
static void azx_add_card_list(struct azx *chip)
{
mutex_lock(&card_list_lock);
list_add(&chip->list, &card_list);
mutex_unlock(&card_list_lock);
}
static void azx_del_card_list(struct azx *chip)
{
mutex_lock(&card_list_lock);
list_del_init(&chip->list);
mutex_unlock(&card_list_lock);
}
/* trigger power-save check at writing parameter */
static int param_set_xint(const char *val, const struct kernel_param *kp)
{
struct azx *chip;
struct hda_codec *c;
int prev = power_save;
int ret = param_set_int(val, kp);
if (ret || prev == power_save)
return ret;
mutex_lock(&card_list_lock);
list_for_each_entry(chip, &card_list, list) {
if (!chip->bus || chip->disabled)
continue;
list_for_each_entry(c, &chip->bus->codec_list, list)
snd_hda_power_sync(c);
}
mutex_unlock(&card_list_lock);
return 0;
}
#else
#define azx_add_card_list(chip) /* NOP */
#define azx_del_card_list(chip) /* NOP */
#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
/*
* power management
*/
static int azx_suspend(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->disabled)
return 0;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
list_for_each_entry(p, &chip->pcm_list, list)
snd_pcm_suspend_all(p->pcm);
if (chip->initialized)
snd_hda_suspend(chip->bus);
if (chip->irq >= 0) {
free_irq(chip->irq, chip);
chip->irq = -1;
}
pci_disable_msi(chip->pci);
pci_disable_device(pci);
pci_save_state(pci);
pci_set_power_state(pci, PCI_D3hot);
static int azx_resume(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (chip->disabled)
return 0;
pci_set_power_state(pci, PCI_D0);
pci_restore_state(pci);
if (pci_enable_device(pci) < 0) {
printk(KERN_ERR "hda-intel: pci_enable_device failed, "
"disabling device\n");
snd_card_disconnect(card);
return -EIO;
}
pci_set_master(pci);
if (chip->msi)
if (pci_enable_msi(pci) < 0)
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
#endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
#ifdef CONFIG_PM_RUNTIME
static int azx_runtime_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
azx_stop_chip(chip);
azx_clear_irq_pending(chip);
return 0;
}
static int azx_runtime_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
azx_init_pci(chip);
azx_init_chip(chip, 1);
return 0;
}
static int azx_runtime_idle(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
if (power_save_controller > 0)
return 0;
if (!power_save_controller ||
!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return -EBUSY;
return 0;
}
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM
static const struct dev_pm_ops azx_pm = {
SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
#define AZX_PM_OPS &azx_pm
#else
#define AZX_PM_OPS NULL
/*
* reboot notifier for hang-up problem at power-down
*/
static int azx_halt(struct notifier_block *nb, unsigned long event, void *buf)
{
struct azx *chip = container_of(nb, struct azx, reboot_notifier);
snd_hda_bus_reboot_notify(chip->bus);
azx_stop_chip(chip);
return NOTIFY_OK;
}
static void azx_notifier_register(struct azx *chip)
{
chip->reboot_notifier.notifier_call = azx_halt;
register_reboot_notifier(&chip->reboot_notifier);
}
static void azx_notifier_unregister(struct azx *chip)
{
if (chip->reboot_notifier.notifier_call)
unregister_reboot_notifier(&chip->reboot_notifier);
}
static int azx_first_init(struct azx *chip);
static int azx_probe_continue(struct azx *chip);
#ifdef SUPPORT_VGA_SWITCHEROO
static struct pci_dev *get_bound_vga(struct pci_dev *pci);
static void azx_vs_set_state(struct pci_dev *pci,
enum vga_switcheroo_state state)
{
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
bool disabled;
wait_for_completion(&chip->probe_wait);
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
if (chip->init_failed)
return;
disabled = (state == VGA_SWITCHEROO_OFF);
if (chip->disabled == disabled)
return;
if (!chip->bus) {
chip->disabled = disabled;
if (!disabled) {
snd_printk(KERN_INFO SFX
"%s: Start delayed initialization\n",
pci_name(chip->pci));
if (azx_first_init(chip) < 0 ||
azx_probe_continue(chip) < 0) {
snd_printk(KERN_ERR SFX
"%s: initialization error\n",
pci_name(chip->pci));
chip->init_failed = true;
}
}
} else {
snd_printk(KERN_INFO SFX
"%s: %s via VGA-switcheroo\n", pci_name(chip->pci),
disabled ? "Disabling" : "Enabling");
if (snd_hda_lock_devices(chip->bus))
snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n",
pci_name(chip->pci));
} else {
snd_hda_unlock_devices(chip->bus);
chip->disabled = false;
}
}
}
static bool azx_vs_can_switch(struct pci_dev *pci)
{
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
wait_for_completion(&chip->probe_wait);
if (chip->init_failed)
return false;
if (chip->disabled || !chip->bus)
return true;
if (snd_hda_lock_devices(chip->bus))
return false;
snd_hda_unlock_devices(chip->bus);
return true;
}
static void init_vga_switcheroo(struct azx *chip)
{
struct pci_dev *p = get_bound_vga(chip->pci);
if (p) {
snd_printk(KERN_INFO SFX
"%s: Handle VGA-switcheroo audio client\n",
pci_name(chip->pci));
chip->use_vga_switcheroo = 1;
pci_dev_put(p);
}
}
static const struct vga_switcheroo_client_ops azx_vs_ops = {
.set_gpu_state = azx_vs_set_state,
.can_switch = azx_vs_can_switch,
};
static int register_vga_switcheroo(struct azx *chip)
if (!chip->use_vga_switcheroo)
return 0;
/* FIXME: currently only handling DIS controller
* is there any machine with two switchable HDMI audio controllers?
*/
err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
VGA_SWITCHEROO_DIS,
chip->bus != NULL);
if (err < 0)
return err;
chip->vga_switcheroo_registered = 1;
return 0;
}
#else
#define init_vga_switcheroo(chip) /* NOP */
#define register_vga_switcheroo(chip) 0