6
votes

I am working on capturing and streaming audio to RTMP server at a moment. I work under MacOS (in Xcode), so for capturing audio sample-buffer I use AVFoundation-framework. But for encoding and streaming I need to use ffmpeg-API and libfaac encoder. So output format must be AAC (for supporting stream playback on iOS-devices).

And I faced with such problem: audio-capturing device (in my case logitech camera) gives me sample-buffer with 512 LPCM samples, and I can select input sample-rate from 16000, 24000, 36000 or 48000 Hz. When I give these 512 samples to AAC-encoder (configured for appropriate sample-rate), I hear a slow and jerking audio (seems as like pice of silence after each frame).

I figured out (maybe I am wrong), that libfaac encoder accepts audio frames only with 1024 samples. When I set input samplerate to 24000 and resample input sample-buffer to 48000 before encoding, I obtain 1024 resampled samples. After encoding these 1024 sampels to AAC, I hear proper sound on output. But my web-cam produce 512 samples in buffer for any input samplerate, when output sample-rate must be 48000 Hz. So I need to do resampling in any case, and I will not obtain exactly 1024 samples in buffer after resampling.

Is there a way to solve this problem within ffmpeg-API functionality?

I would be grateful for any help.

PS: I guess that I can accumulate resampled buffers until count of samples become 1024, and then encode it, but this is stream so there will be troubles with resulting timestamps and with other input devices, and such solution is not suitable.

The current issue came out of the problem described in [question]: How to fill audio AVFrame (ffmpeg) with the data obtained from CMSampleBufferRef (AVFoundation)?

Here is a code with audio-codec configs (there also was video stream but video work fine):

    /*global variables*/
    static AVFrame *aframe;
    static AVFrame *frame;
    AVOutputFormat *fmt; 
    AVFormatContext *oc; 
    AVStream *audio_st, *video_st;
Init ()
{
    AVCodec *audio_codec, *video_codec;
    int ret;

    avcodec_register_all();  
    av_register_all();
    avformat_network_init();
    avformat_alloc_output_context2(&oc, NULL, "flv", filename);
    fmt = oc->oformat;
    oc->oformat->video_codec = AV_CODEC_ID_H264;
    oc->oformat->audio_codec = AV_CODEC_ID_AAC;
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) 
      { //…  /*init video codec*/}
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
    audio_codec= avcodec_find_encoder(fmt->audio_codec);

    if (!(audio_codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(fmt->audio_codec));
        exit(1);
    }
    audio_st= avformat_new_stream(oc, audio_codec);
    if (!audio_st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    audio_st->id = oc->nb_streams-1;

    //AAC:
    audio_st->codec->sample_fmt  = AV_SAMPLE_FMT_S16;
    audio_st->codec->bit_rate    = 32000;
    audio_st->codec->sample_rate = 48000;
    audio_st->codec->profile=FF_PROFILE_AAC_LOW;
    audio_st->time_base = (AVRational){1, audio_st->codec->sample_rate };
    audio_st->codec->channels    = 1;
    audio_st->codec->channel_layout = AV_CH_LAYOUT_MONO;      


    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    if (video_st)
    {
    //   …
    /*prepare video*/
    }
    if (audio_st)
    {
    aframe = avcodec_alloc_frame();
    if (!aframe) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }
    AVCodecContext *c;
    int ret;

    c = audio_st->codec;


    ret = avcodec_open2(c, audio_codec, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
        exit(1);
    }

    //…
}

And resampling and encoding audio:

if (mType == kCMMediaType_Audio)
{
    CMSampleTimingInfo timing_info;
    CMSampleBufferGetSampleTimingInfo(sampleBuffer, 0, &timing_info);
    double  pts=0;
    double  dts=0;
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet, ret;
     av_init_packet(&pkt);
    c = audio_st->codec;
      CMItemCount numSamples = CMSampleBufferGetNumSamples(sampleBuffer);

    NSUInteger channelIndex = 0;

    CMBlockBufferRef audioBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
    size_t audioBlockBufferOffset = (channelIndex * numSamples * sizeof(SInt16));
    size_t lengthAtOffset = 0;
    size_t totalLength = 0;
    SInt16 *samples = NULL;
    CMBlockBufferGetDataPointer(audioBlockBuffer, audioBlockBufferOffset, &lengthAtOffset, &totalLength, (char **)(&samples));

    const AudioStreamBasicDescription *audioDescription = CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer));

    SwrContext *swr = swr_alloc();

    int in_smprt = (int)audioDescription->mSampleRate;
    av_opt_set_int(swr, "in_channel_layout",  AV_CH_LAYOUT_MONO, 0);

    av_opt_set_int(swr, "out_channel_layout", audio_st->codec->channel_layout,  0);

    av_opt_set_int(swr, "in_channel_count", audioDescription->mChannelsPerFrame,  0);
    av_opt_set_int(swr, "out_channel_count", audio_st->codec->channels,  0);

    av_opt_set_int(swr, "out_channel_layout", audio_st->codec->channel_layout,  0);
    av_opt_set_int(swr, "in_sample_rate",     audioDescription->mSampleRate,0);

    av_opt_set_int(swr, "out_sample_rate",    audio_st->codec->sample_rate,0);

    av_opt_set_sample_fmt(swr, "in_sample_fmt",  AV_SAMPLE_FMT_S16, 0);

    av_opt_set_sample_fmt(swr, "out_sample_fmt", audio_st->codec->sample_fmt,  0);

    swr_init(swr);
    uint8_t **input = NULL;
    int src_linesize;
    int in_samples = (int)numSamples;
    ret = av_samples_alloc_array_and_samples(&input, &src_linesize, audioDescription->mChannelsPerFrame,
                                             in_samples, AV_SAMPLE_FMT_S16P, 0);


    *input=(uint8_t*)samples;
    uint8_t *output=NULL;


    int out_samples = av_rescale_rnd(swr_get_delay(swr, in_smprt) +in_samples, (int)audio_st->codec->sample_rate, in_smprt, AV_ROUND_UP);

    av_samples_alloc(&output, NULL, audio_st->codec->channels, out_samples, audio_st->codec->sample_fmt, 0);
    in_samples = (int)numSamples;
    out_samples = swr_convert(swr, &output, out_samples, (const uint8_t **)input, in_samples);


    aframe->nb_samples =(int) out_samples;


    ret = avcodec_fill_audio_frame(aframe, audio_st->codec->channels, audio_st->codec->sample_fmt,
                             (uint8_t *)output,
                             (int) out_samples *
                             av_get_bytes_per_sample(audio_st->codec->sample_fmt) *
                             audio_st->codec->channels, 1);

    aframe->channel_layout = audio_st->codec->channel_layout;
    aframe->channels=audio_st->codec->channels;
    aframe->sample_rate= audio_st->codec->sample_rate;

    if (timing_info.presentationTimeStamp.timescale!=0)
        pts=(double) timing_info.presentationTimeStamp.value/timing_info.presentationTimeStamp.timescale;

    aframe->pts=pts*audio_st->time_base.den;
    aframe->pts = av_rescale_q(aframe->pts, audio_st->time_base, audio_st->codec->time_base);

    ret = avcodec_encode_audio2(c, &pkt, aframe, &got_packet);

    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }
    swr_free(&swr);
    if (got_packet)
    {
        pkt.stream_index = audio_st->index;

        pkt.pts = av_rescale_q(pkt.pts, audio_st->codec->time_base, audio_st->time_base);
        pkt.dts = av_rescale_q(pkt.dts, audio_st->codec->time_base, audio_st->time_base);

        // Write the compressed frame to the media file.
       ret = av_interleaved_write_frame(oc, &pkt);
       if (ret != 0) {
            fprintf(stderr, "Error while writing audio frame: %s\n",
                    av_err2str(ret));
            exit(1);
        }

}
4
The iPhone supports more than aac audio , just curious why your only supporting AAC, doesn't the logitech camera support any of the following, g711 (ulaw), apcm, mpeg2 audio etc. Most camera's we know of support at least g711, technically with some additional api's amr is also possible .Michelle Cannon
AAC codec is one of required codecs in my task, and with other codecs I have not got such problems.Aleksei2414904
hi Aleksei2414904 i am encoding PCM samples into aac android and facing same problem please help me if you found any solution.Mohit Chauhan

4 Answers

0
votes

I got a similar problem. I was encoding PCM packets to AAC while the length of PCM packets are sometimes smaller than 1024.

If I encode the packet that's smaller than 1024, the audio will be slow. On the other hand, if I throw it away, the audio will get faster. swr_convert function didn't have any automatic buffering from my observation.

I ended up with a buffer scheme that packets was filled to a 1024 buffer and the buffer gets encoded and cleaned everytime it's full.

The function to fill buffer is below:

// put frame data into buffer of fixed size
bool ffmpegHelper::putAudioBuffer(const AVFrame *pAvFrameIn, AVFrame **pAvFrameBuffer, AVCodecContext *dec_ctx, int frame_size, int &k0) {
  // prepare pFrameAudio
  if (!(*pAvFrameBuffer)) {
    if (!(*pAvFrameBuffer = av_frame_alloc())) {
      av_log(NULL, AV_LOG_ERROR, "Alloc frame failed\n");
      return false;
    } else {
      (*pAvFrameBuffer)->format = dec_ctx->sample_fmt;
      (*pAvFrameBuffer)->channels = dec_ctx->channels;
      (*pAvFrameBuffer)->sample_rate = dec_ctx->sample_rate;
      (*pAvFrameBuffer)->nb_samples = frame_size;
      int ret = av_frame_get_buffer(*pAvFrameBuffer, 0);
      if (ret < 0) {
        char err[500];
        av_log(NULL, AV_LOG_ERROR, "get audio buffer failed: %s\n",
          av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, ret));
        return false;
      }
      (*pAvFrameBuffer)->nb_samples = 0;
      (*pAvFrameBuffer)->pts = pAvFrameIn->pts;
    }
  }

  // copy input data to buffer
  int n_channels = pAvFrameIn->channels;
  int new_samples = min(pAvFrameIn->nb_samples - k0, frame_size - (*pAvFrameBuffer)->nb_samples);
  int k1 = (*pAvFrameBuffer)->nb_samples;

  if (pAvFrameIn->format == AV_SAMPLE_FMT_S16) {
    int16_t *d_in = (int16_t *)pAvFrameIn->data[0];
    d_in += n_channels * k0;
    int16_t *d_out = (int16_t *)(*pAvFrameBuffer)->data[0];
    d_out += n_channels * k1;

    for (int i = 0; i < new_samples; ++i) {
      for (int j = 0; j < pAvFrameIn->channels; ++j) {
        *d_out++ = *d_in++;
      }
    }
  } else {
    printf("not handled format for audio buffer\n");
    return false;
  }

  (*pAvFrameBuffer)->nb_samples += new_samples;
  k0 += new_samples;

  return true;
}

And the loop for fill buffer and encode is below:

// transcoding needed
int got_frame;
AVMediaType stream_type;
// decode the packet (do it your self)
decodePacket(packet, dec_ctx, &pAvFrame_, got_frame);

if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
    ret = 0;
    // break audio packet down to buffer
    if (enc_ctx->frame_size > 0) {
        int k = 0;
        while (k < pAvFrame_->nb_samples) {
            if (!putAudioBuffer(pAvFrame_, &pFrameAudio_, dec_ctx, enc_ctx->frame_size, k))
                return false;
            if (pFrameAudio_->nb_samples == enc_ctx->frame_size) {
                // the buffer is full, encode it (do it yourself)
                ret = encodeFrame(pFrameAudio_, stream_index, got_frame, false);
                if (ret < 0)
                    return false;
                pFrameAudio_->pts += enc_ctx->frame_size;
                pFrameAudio_->nb_samples = 0;
            }
        }
    } else {
        ret = encodeFrame(pAvFrame_, stream_index, got_frame, false);
    }
} else {
    // encode packet directly
    ret = encodeFrame(pAvFrame_, stream_index, got_frame, false);
}
1
votes

I also ended up here after having a similar problem. I'm reading audio and video from a Blackmagic Decklink SDI card in 720p50 meaning I had 960 samples per videoframe (48k/50fps) I wanted to encode together with the video. Got really weird audio when only sending 960 samples to aacenc and it didn't really complain about this fact either.

Started to use AVAudioFifo (see ffmpeg/doc/examples/transcode_aac.c) and kept adding frames to it until I had enough frames to satisfy aacenc. This will mean I have samples playing too late I guess, since pts will be set on 1024 samples when the first 960 should really have another value. But, it's not really noticeable as far as I can hear/see.

0
votes

You have to break sample buffer into chunks of size 1024, i did for recording mp3 in android for more info follow these links link1,links2

0
votes

If anyone ended up here, I had the same issue, and just as @Mohit pointed out for AAC each audio frame has to be broken down into 1024 bytes chunks.

example:

uint8_t *buffer = (uint8_t*) malloc(1024);
AVFrame *frame = av_frame_alloc();
while((fread(buffer, 1024, 1, fp)) == 1) {
    frame->data[0] = buffer;
}