Newer
Older
* The below are the main callbacks from hda_codec.
*
* They are just the skeleton to call sub-callbacks according to the
* current setting of chip->single_cmd.
*/
/* send a command */
static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
struct azx *chip = bus->private_data;
chip->last_cmd[azx_command_addr(val)] = val;
if (chip->single_cmd)
return azx_single_send_cmd(bus, val);
return azx_corb_send_cmd(bus, val);
}
/* get a response */
static unsigned int azx_get_response(struct hda_bus *bus,
unsigned int addr)
struct azx *chip = bus->private_data;
if (chip->single_cmd)
return azx_single_get_response(bus, addr);
return azx_rirb_get_response(bus, addr);
#ifdef CONFIG_SND_HDA_POWER_SAVE
static void azx_power_notify(struct hda_bus *bus);
static int azx_reset(struct azx *chip, int full_reset)
if (!full_reset)
goto __skip;
/* clear STATESTS */
azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
/* reset controller */
azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~ICH6_GCTL_RESET);
count = 50;
while (azx_readb(chip, GCTL) && --count)
msleep(1);
/* delay for >= 100us for codec PLL to settle per spec
* Rev 0.9 section 5.5.1
*/
msleep(1);
/* Bring controller out of reset */
azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | ICH6_GCTL_RESET);
count = 50;
while (!azx_readb(chip, GCTL) && --count)
/* Brent Chartrand said to wait >= 540us for codecs to initialize */
__skip:
if (!azx_readb(chip, GCTL)) {
snd_printd(SFX "azx_reset: controller not ready!\n");
if (!chip->single_cmd)
azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
ICH6_GCTL_UNSOL);
snd_printdd(SFX "codec_mask = 0x%x\n", chip->codec_mask);
}
return 0;
}
/*
* Lowlevel interface
*/
/* enable interrupts */
static void azx_int_enable(struct azx *chip)
{
/* enable controller CIE and GIE */
azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN);
}
/* disable interrupts */
static void azx_int_disable(struct azx *chip)
{
int i;
/* disable interrupts in stream descriptor */
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_sd_writeb(azx_dev, SD_CTL,
azx_sd_readb(azx_dev, SD_CTL) & ~SD_INT_MASK);
}
/* disable SIE for all streams */
azx_writeb(chip, INTCTL, 0);
/* disable controller CIE and GIE */
azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
~(ICH6_INT_CTRL_EN | ICH6_INT_GLOBAL_EN));
}
/* clear interrupts */
static void azx_int_clear(struct azx *chip)
for (i = 0; i < chip->num_streams; i++) {
struct azx_dev *azx_dev = &chip->azx_dev[i];
azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
}
/* clear STATESTS */
azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
/* clear rirb status */
azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
/* clear int status */
azx_writel(chip, INTSTS, ICH6_INT_CTRL_EN | ICH6_INT_ALL_STREAM);
}
/* start a stream */
static void azx_stream_start(struct azx *chip, struct azx_dev *azx_dev)
/*
* Before stream start, initialize parameter
*/
azx_dev->insufficient = 1;
azx_writel(chip, INTCTL,
azx_readl(chip, INTCTL) | (1 << azx_dev->index));
/* set DMA start and interrupt mask */
azx_sd_writeb(azx_dev, SD_CTL, azx_sd_readb(azx_dev, SD_CTL) |
SD_CTL_DMA_START | SD_INT_MASK);
}
/* stop DMA */
static void azx_stream_clear(struct azx *chip, struct azx_dev *azx_dev)
{
azx_sd_writeb(azx_dev, SD_CTL, azx_sd_readb(azx_dev, SD_CTL) &
~(SD_CTL_DMA_START | SD_INT_MASK));
azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
}
/* stop a stream */
static void azx_stream_stop(struct azx *chip, struct azx_dev *azx_dev)
{
azx_stream_clear(chip, azx_dev);
azx_writel(chip, INTCTL,
azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
* reset and start the controller registers
static void azx_init_chip(struct azx *chip, int full_reset)
if (chip->initialized)
return;
azx_reset(chip, full_reset);
/* initialize interrupts */
azx_int_clear(chip);
azx_int_enable(chip);
/* initialize the codec command I/O */
if (!chip->single_cmd)
azx_init_cmd_io(chip);
/* program the position buffer */
azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
chip->initialized = 1;
}
/*
* initialize the PCI registers
*/
/* update bits in a PCI register byte */
static void update_pci_byte(struct pci_dev *pci, unsigned int reg,
unsigned char mask, unsigned char val)
{
unsigned char data;
pci_read_config_byte(pci, reg, &data);
data &= ~mask;
data |= (val & mask);
pci_write_config_byte(pci, reg, data);
}
static void azx_init_pci(struct azx *chip)
{
/* Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
* TCSEL == Traffic Class Select Register, which sets PCI express QOS
* Ensuring these bits are 0 clears playback static on some HD Audio
* codecs.
* The PCI register TCSEL is defined in the Intel manuals.
if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
snd_printdd(SFX "Clearing TCSEL\n");
update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
/* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
* we need to enable snoop.
*/
if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
snd_printdd(SFX "Setting ATI snoop: %d\n", azx_snoop(chip));
ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 0x07,
azx_snoop(chip) ? ATI_SB450_HDAUDIO_ENABLE_SNOOP : 0);
}
/* For NVIDIA HDA, enable snoop */
if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
snd_printdd(SFX "Setting Nvidia snoop: %d\n", azx_snoop(chip));
update_pci_byte(chip->pci,
NVIDIA_HDA_TRANSREG_ADDR,
0x0f, NVIDIA_HDA_ENABLE_COHBITS);
update_pci_byte(chip->pci,
NVIDIA_HDA_ISTRM_COH,
0x01, NVIDIA_HDA_ENABLE_COHBIT);
update_pci_byte(chip->pci,
NVIDIA_HDA_OSTRM_COH,
0x01, NVIDIA_HDA_ENABLE_COHBIT);
}
/* Enable SCH/PCH snoop if needed */
if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
if ((!azx_snoop(chip) && !(snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)) ||
(azx_snoop(chip) && (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP))) {
snoop &= ~INTEL_SCH_HDA_DEVC_NOSNOOP;
if (!azx_snoop(chip))
snoop |= INTEL_SCH_HDA_DEVC_NOSNOOP;
pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, snoop);
pci_read_config_word(chip->pci,
INTEL_SCH_HDA_DEVC, &snoop);
}
snd_printdd(SFX "SCH snoop: %s\n",
(snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
? "Disabled" : "Enabled");
static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev);
static irqreturn_t azx_interrupt(int irq, void *dev_id)
struct azx *chip = dev_id;
struct azx_dev *azx_dev;
u8 sd_status;
if (chip->disabled) {
spin_unlock(&chip->reg_lock);
status = azx_readl(chip, INTSTS);
if (status == 0) {
spin_unlock(&chip->reg_lock);
return IRQ_NONE;
}
for (i = 0; i < chip->num_streams; i++) {
azx_dev = &chip->azx_dev[i];
if (status & azx_dev->sd_int_sta_mask) {
sd_status = azx_sd_readb(azx_dev, SD_STS);
if (!azx_dev->substream || !azx_dev->running ||
!(sd_status & SD_INT_COMPLETE))
continue;
/* check whether this IRQ is really acceptable */
ok = azx_position_ok(chip, azx_dev);
if (ok == 1) {
spin_unlock(&chip->reg_lock);
snd_pcm_period_elapsed(azx_dev->substream);
spin_lock(&chip->reg_lock);
} else if (ok == 0 && chip->bus && chip->bus->workq) {
/* bogus IRQ, process it later */
azx_dev->irq_pending = 1;
queue_work(chip->bus->workq,
&chip->irq_pending_work);
}
}
}
/* clear rirb int */
status = azx_readb(chip, RIRBSTS);
if (status & RIRB_INT_MASK) {
if (status & RIRB_INT_RESPONSE) {
if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
}
#if 0
/* clear state status int */
if (azx_readb(chip, STATESTS) & 0x04)
azx_writeb(chip, STATESTS, 0x04);
#endif
spin_unlock(&chip->reg_lock);
return IRQ_HANDLED;
}
static int setup_bdle(struct azx *chip,
struct snd_pcm_substream *substream,
struct azx_dev *azx_dev, u32 **bdlp,
int ofs, int size, int with_ioc)
{
u32 *bdl = *bdlp;
while (size > 0) {
dma_addr_t addr;
int chunk;
if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
return -EINVAL;
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
/* program the address field of the BDL entry */
bdl[0] = cpu_to_le32((u32)addr);
bdl[1] = cpu_to_le32(upper_32_bits(addr));
/* program the size field of the BDL entry */
chunk = snd_pcm_sgbuf_get_chunk_size(substream, ofs, size);
/* one BDLE cannot cross 4K boundary on CTHDA chips */
if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
u32 remain = 0x1000 - (ofs & 0xfff);
if (chunk > remain)
chunk = remain;
}
bdl[2] = cpu_to_le32(chunk);
/* program the IOC to enable interrupt
* only when the whole fragment is processed
*/
size -= chunk;
bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
bdl += 4;
azx_dev->frags++;
ofs += chunk;
}
*bdlp = bdl;
return ofs;
}
static int azx_setup_periods(struct azx *chip,
struct snd_pcm_substream *substream,
u32 *bdl;
int i, ofs, periods, period_bytes;
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
period_bytes = azx_dev->period_bytes;
periods = azx_dev->bufsize / period_bytes;
bdl = (u32 *)azx_dev->bdl.area;
ofs = 0;
azx_dev->frags = 0;
pos_adj = bdl_pos_adj[chip->dev_index];
if (pos_adj > 0) {
struct snd_pcm_runtime *runtime = substream->runtime;
int pos_align = pos_adj;
pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
pos_adj = pos_align;
else
pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
pos_align;
pos_adj = frames_to_bytes(runtime, pos_adj);
if (pos_adj >= period_bytes) {
snd_printk(KERN_WARNING SFX "Too big adjustment %d\n",
bdl_pos_adj[chip->dev_index]);
ofs = setup_bdle(chip, substream, azx_dev,
&bdl, ofs, pos_adj,
!substream->runtime->no_period_wakeup);
} else
pos_adj = 0;
for (i = 0; i < periods; i++) {
if (i == periods - 1 && pos_adj)
ofs = setup_bdle(chip, substream, azx_dev, &bdl, ofs,
period_bytes - pos_adj, 0);
else
ofs = setup_bdle(chip, substream, azx_dev, &bdl, ofs,
period_bytes,
!substream->runtime->no_period_wakeup);
snd_printk(KERN_ERR SFX "Too many BDL entries: buffer=%d, period=%d\n",
azx_dev->bufsize, period_bytes);
return -EINVAL;
/* reset stream */
static void azx_stream_reset(struct azx *chip, struct azx_dev *azx_dev)
azx_stream_clear(chip, azx_dev);
azx_sd_writeb(azx_dev, SD_CTL, azx_sd_readb(azx_dev, SD_CTL) |
SD_CTL_STREAM_RESET);
udelay(3);
timeout = 300;
while (!((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) &&
--timeout)
;
val &= ~SD_CTL_STREAM_RESET;
azx_sd_writeb(azx_dev, SD_CTL, val);
udelay(3);
timeout = 300;
/* waiting for hardware to report that the stream is out of reset */
while (((val = azx_sd_readb(azx_dev, SD_CTL)) & SD_CTL_STREAM_RESET) &&
--timeout)
;
/* reset first position - may not be synced with hw at this time */
*azx_dev->posbuf = 0;
/*
* set up the SD for streaming
*/
static int azx_setup_controller(struct azx *chip, struct azx_dev *azx_dev)
{
/* make sure the run bit is zero for SD */
azx_stream_clear(chip, azx_dev);
val = azx_sd_readl(azx_dev, SD_CTL);
val = (val & ~SD_CTL_STREAM_TAG_MASK) |
(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
if (!azx_snoop(chip))
val |= SD_CTL_TRAFFIC_PRIO;
azx_sd_writel(azx_dev, SD_CTL, val);
/* program the length of samples in cyclic buffer */
azx_sd_writel(azx_dev, SD_CBL, azx_dev->bufsize);
/* program the stream format */
/* this value needs to be the same as the one programmed */
azx_sd_writew(azx_dev, SD_FORMAT, azx_dev->format_val);
/* program the stream LVI (last valid index) of the BDL */
azx_sd_writew(azx_dev, SD_LVI, azx_dev->frags - 1);
/* program the BDL address */
/* lower BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
azx_sd_writel(azx_dev, SD_BDLPU, upper_32_bits(azx_dev->bdl.addr));
/* enable the position buffer */
if (chip->position_fix[0] != POS_FIX_LPIB ||
chip->position_fix[1] != POS_FIX_LPIB) {
if (!(azx_readl(chip, DPLBASE) & ICH6_DPLBASE_ENABLE))
azx_writel(chip, DPLBASE,
(u32)chip->posbuf.addr | ICH6_DPLBASE_ENABLE);
}
/* set the interrupt enable bits in the descriptor control register */
azx_sd_writel(azx_dev, SD_CTL,
azx_sd_readl(azx_dev, SD_CTL) | SD_INT_MASK);
/*
* Probe the given codec address
*/
static int probe_codec(struct azx *chip, int addr)
{
unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res;
mutex_lock(&chip->bus->cmd_mutex);
chip->probing = 1;
azx_send_cmd(chip->bus, cmd);
res = azx_get_response(chip->bus, addr);
mutex_unlock(&chip->bus->cmd_mutex);
if (res == -1)
return -EIO;
snd_printdd(SFX "codec #%d probed OK\n", addr);
static int azx_attach_pcm_stream(struct hda_bus *bus, struct hda_codec *codec,
struct hda_pcm *cpcm);
static void azx_stop_chip(struct azx *chip);
static void azx_bus_reset(struct hda_bus *bus)
{
struct azx *chip = bus->private_data;
bus->in_reset = 1;
azx_stop_chip(chip);
azx_init_chip(chip, 1);
if (chip->initialized) {
struct azx_pcm *p;
list_for_each_entry(p, &chip->pcm_list, list)
snd_pcm_suspend_all(p->pcm);
snd_hda_suspend(chip->bus);
snd_hda_resume(chip->bus);
}
bus->in_reset = 0;
}
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
[AZX_DRIVER_NVIDIA] = 8,
static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
int c, codecs, err;
int max_slots;
memset(&bus_temp, 0, sizeof(bus_temp));
bus_temp.private_data = chip;
bus_temp.modelname = model;
bus_temp.pci = chip->pci;
bus_temp.ops.command = azx_send_cmd;
bus_temp.ops.get_response = azx_get_response;
bus_temp.ops.attach_pcm = azx_attach_pcm_stream;
bus_temp.ops.bus_reset = azx_bus_reset;
#ifdef CONFIG_SND_HDA_POWER_SAVE
bus_temp.power_save = &power_save;
bus_temp.ops.pm_notify = azx_power_notify;
#endif
err = snd_hda_bus_new(chip->card, &bus_temp, &chip->bus);
if (err < 0)
if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
snd_printd(SFX "Enable delay in RIRB handling\n");
chip->bus->needs_damn_long_delay = 1;
max_slots = azx_max_codecs[chip->driver_type];
if (!max_slots)
max_slots = AZX_DEFAULT_CODECS;
/* First try to probe all given codec slots */
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
if (probe_codec(chip, c) < 0) {
/* Some BIOSen give you wrong codec addresses
* that don't exist
*/
snd_printk(KERN_WARNING SFX
"Codec #%d probe error; "
"disabling it...\n", c);
chip->codec_mask &= ~(1 << c);
/* More badly, accessing to a non-existing
* codec often screws up the controller chip,
* and disturbs the further communications.
* Thus if an error occurs during probing,
* better to reset the controller chip to
* get back to the sanity state.
*/
azx_stop_chip(chip);
azx_init_chip(chip, 1);
/* AMD chipsets often cause the communication stalls upon certain
* sequence like the pin-detection. It seems that forcing the synced
* access works around the stall. Grrr...
*/
if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
snd_printd(SFX "Enable sync_write for stable communication\n");
chip->bus->sync_write = 1;
chip->bus->allow_bus_reset = 1;
}
/* Then create codec instances */
for (c = 0; c < max_slots; c++) {
if ((chip->codec_mask & (1 << c)) & chip->codec_probe_mask) {
struct hda_codec *codec;
err = snd_hda_codec_new(chip->bus, c, &codec);
codec->beep_mode = chip->beep_mode;
}
}
if (!codecs) {
snd_printk(KERN_ERR SFX "no codecs initialized\n");
return -ENXIO;
}
return 0;
}
/* configure each codec instance */
static int __devinit azx_codec_configure(struct azx *chip)
{
struct hda_codec *codec;
list_for_each_entry(codec, &chip->bus->codec_list, list) {
snd_hda_codec_configure(codec);
}
return 0;
}
/*
* PCM support
*/
/* assign a stream for the PCM */
static inline struct azx_dev *
azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
/* make a non-zero unique key for the substream */
int key = (substream->pcm->device << 16) | (substream->number << 2) |
(substream->stream + 1);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dev = chip->playback_index_offset;
nums = chip->playback_streams;
} else {
dev = chip->capture_index_offset;
nums = chip->capture_streams;
}
for (i = 0; i < nums; i++, dev++)
if (res->assigned_key == key)
if (res) {
res->opened = 1;
res->assigned_key = key;
static inline void azx_release_device(struct azx_dev *azx_dev)
static struct snd_pcm_hardware azx_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
/* No full-resume yet implemented */
/* SNDRV_PCM_INFO_RESUME |*/
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = AZX_MAX_BUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = AZX_MAX_BUF_SIZE / 2,
.periods_min = 2,
.periods_max = AZX_MAX_FRAG,
.fifo_size = 0,
};
static int azx_pcm_open(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev;
struct snd_pcm_runtime *runtime = substream->runtime;
int buff_step;
azx_dev = azx_assign_device(chip, substream);
return -EBUSY;
}
runtime->hw = azx_pcm_hw;
runtime->hw.channels_min = hinfo->channels_min;
runtime->hw.channels_max = hinfo->channels_max;
runtime->hw.formats = hinfo->formats;
runtime->hw.rates = hinfo->rates;
snd_pcm_limit_hw_rates(runtime);
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
if (chip->align_buffer_size)
/* constrain buffer sizes to be multiple of 128
bytes. This is more efficient in terms of memory
access but isn't required by the HDA spec and
prevents users from specifying exact period/buffer
sizes. For example for 44.1kHz, a period size set
to 20ms will be rounded to 19.59ms. */
buff_step = 128;
else
/* Don't enforce steps on buffer sizes, still need to
be multiple of 4 bytes (HDA spec). Tested on Intel
HDA controllers, may not work on all devices where
option needs to be disabled */
buff_step = 4;
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
buff_step);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
buff_step);
snd_hda_power_up_d3wait(apcm->codec);
err = hinfo->ops.open(hinfo, apcm->codec, substream);
if (err < 0) {
snd_hda_power_down(apcm->codec);
snd_pcm_limit_hw_rates(runtime);
/* sanity check */
if (snd_BUG_ON(!runtime->hw.channels_min) ||
snd_BUG_ON(!runtime->hw.channels_max) ||
snd_BUG_ON(!runtime->hw.formats) ||
snd_BUG_ON(!runtime->hw.rates)) {
azx_release_device(azx_dev);
hinfo->ops.close(hinfo, apcm->codec, substream);
snd_hda_power_down(apcm->codec);
mutex_unlock(&chip->open_mutex);
return -EINVAL;
}
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = substream;
azx_dev->running = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
runtime->private_data = azx_dev;
static int azx_pcm_close(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
azx_dev->substream = NULL;
azx_dev->running = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
azx_release_device(azx_dev);
hinfo->ops.close(hinfo, apcm->codec, substream);
snd_hda_power_down(apcm->codec);
static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct snd_pcm_runtime *runtime = substream->runtime;
struct azx_dev *azx_dev = get_azx_dev(substream);
mark_runtime_wc(chip, azx_dev, runtime, false);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
if (ret < 0)
return ret;
mark_runtime_wc(chip, azx_dev, runtime, true);
return ret;
static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev = get_azx_dev(substream);
struct azx *chip = apcm->chip;
struct snd_pcm_runtime *runtime = substream->runtime;
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
/* reset BDL address */
azx_sd_writel(azx_dev, SD_BDLPL, 0);
azx_sd_writel(azx_dev, SD_BDLPU, 0);
azx_sd_writel(azx_dev, SD_CTL, 0);
azx_dev->bufsize = 0;
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
mark_runtime_wc(chip, azx_dev, runtime, false);
return snd_pcm_lib_free_pages(substream);
}
static int azx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
struct azx_dev *azx_dev = get_azx_dev(substream);
struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int bufsize, period_bytes, format_val, stream_tag;
struct hda_spdif_out *spdif =
snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
unsigned short ctls = spdif ? spdif->ctls : 0;
azx_stream_reset(chip, azx_dev);
format_val = snd_hda_calc_stream_format(runtime->rate,
runtime->channels,
runtime->format,
hinfo->maxbps,
snd_printk(KERN_ERR SFX
"invalid format_val, rate=%d, ch=%d, format=%d\n",
runtime->rate, runtime->channels, runtime->format);
return -EINVAL;
}
bufsize = snd_pcm_lib_buffer_bytes(substream);
period_bytes = snd_pcm_lib_period_bytes(substream);
snd_printdd(SFX "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
bufsize, format_val);
if (bufsize != azx_dev->bufsize ||
period_bytes != azx_dev->period_bytes ||
format_val != azx_dev->format_val) {
azx_dev->bufsize = bufsize;
azx_dev->period_bytes = period_bytes;
azx_dev->format_val = format_val;
err = azx_setup_periods(chip, substream, azx_dev);
if (err < 0)
return err;
}
/* wallclk has 24Mhz clock source */
azx_dev->period_wallclk = (((runtime->period_size * 24000) /
runtime->rate) * 1000);
azx_setup_controller(chip, azx_dev);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
azx_dev->fifo_size = azx_sd_readw(azx_dev, SD_FIFOSIZE) + 1;
else
azx_dev->fifo_size = 0;
stream_tag = azx_dev->stream_tag;
/* CA-IBG chips need the playback stream starting from 1 */
if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
stream_tag > chip->capture_streams)
stream_tag -= chip->capture_streams;
return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
azx_dev->format_val, substream);
static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev;
struct snd_pcm_substream *s;
int rstart = 0, start, nsync = 0, sbits = 0;
case SNDRV_PCM_TRIGGER_START:
rstart = 1;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_SUSPEND:
return -EINVAL;
}
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
sbits |= 1 << azx_dev->index;
nsync++;
snd_pcm_trigger_done(s, substream);
}
spin_lock(&chip->reg_lock);
if (nsync > 1) {
/* first, set SYNC bits of corresponding streams */
if (chip->driver_caps & AZX_DCAPS_OLD_SSYNC)
azx_writel(chip, OLD_SSYNC,
azx_readl(chip, OLD_SSYNC) | sbits);
else
azx_writel(chip, SSYNC, azx_readl(chip, SSYNC) | sbits);
}
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
azx_dev = get_azx_dev(s);
if (start) {
azx_dev->start_wallclk = azx_readl(chip, WALLCLK);
if (!rstart)
azx_dev->start_wallclk -=
azx_dev->period_wallclk;
} else {