0
votes

developing an Android app that does realtime synthesis. I'm using NDK to generate the waveforms and such and Java to do all the UI. I have the following:

private Thread audioThread; 

@Override
protected void onCreate(Bundle savedInstanceState) {
     // UI Initializations here
     // Audio Thread creation:
      if (audioThread == null) {
        audioThread = new Thread() {
            public void run() {
                setPriority(Thread.MAX_PRIORITY);
                JNIWrapper.runProcess();
            }
        };
        audioThread.start();
    }
}

In my C++ file:

void Java_com_rfoo_runProcess() {
    OPENSL_STREAM *p = android_OpenAudioDevice(SAMPLE_RATE, 0, 2, FRAME_SIZE);
    double outBuffer[FRAME_SIZE];

    while (true) {
         // Audio Processing code happens HERE
         // Write to output buffer here
         android_AudioOut(p, outBuffer, FRAME_SIZE);
    }
    android_CloseAudioDevice(p);
}

Now this would be all groovy if I wasn't doing a lot of signal processing work in my runProcess code. Because there's a lot of work going on, my UI latency is really high and results in clicks when I try to change a parameter for my signal processing code (such as frequency, ADSR envelope, filter cutoff frequency, etc).

What ways exist to reduce this latency? In iOS and PortAudio there are audio callbacks that are routinely called when a time interval/buffer is filled. I tried searching for a similar audio callback that exists in Android and I can't find one. Should I program my own timer to call my processing code?

Thanks!

1
Have you tried looking into using openSLES for you audio playback? It's probably as low latency as you can get and also uses the callback mechanism for queuing the next buffer.WLGfx
@WLGfx that's what I am doingyun
Actually upon closer inspection I set up my openSLES drivers wrong. Gotta rework ityun

1 Answers

2
votes

Yup SO I totally set up my callback wrong... in fact I didn't even set the callback at all.

To rectify this, I followed some tips online and created a processing callback:

// Define the callback:
typedef void (*opensl_callback) (void *context, int buffer_frames, 
                                 int output_channels, short *output_buffer);
// Declare callback:
static opensl_callback myAudioCallback;
// Define custom callback:
static void audioCallback(void *context, int buffer_frames, 
                          int output_channels, short *output_buffer) {
    // Get my object's data
    AudioData *data = (AudioData *)context;
    // Process here! Then: 
    output_buffer[i] = final_sample;
} 

How I declared/initialized the OpenSL stream:

jboolean Java_com_rfoo_AudioProcessor_runProcess(JNIEnv *, jobject, 
                                                 int srate, int numFrames) {
    myAudioCallback = audioCallback;
    OPENSL_Stream *p = opensl_openDevice(srate, 2, numFrames, myAudioCallback, audioData);
    // Check if successful initialization
    if (!p) return JNI_FALSE;
    // Start our process:
    opensl_startProcess(p);
    return JNI_TRUE;
}

Basically what opensl_openDevice() and opensl_startProcess() do:

OPENSL_STREAM *opensl_openDevice(int sampleRate, int outChans, int numFrames, opensl_callback cb, void *data) {
    if (!cb) {
       return NULL;
    }
    if (outChans == 0) {
       return NULL;
    }

    SLuint32 srmillihz = convertSampleRate(sampleRate);
    if (srmillihz < 0) {
      return NULL;
    }

    OPENSL_STREAM *p = (OPENSL_STREAM *) calloc(1, sizeof(OPENSL_STREAM));
    if (!p) {
      return NULL;
    }

    p->callback = cb;
    p->data = data;
    p->isRunning = 0;

    p->outputChannels = outChans;
    p->sampleRate = sampleRate;

    p->thresholdMillis = 750.0 * numFrames / sampleRate;

    p->outputBuffer = NULL;
    p->dummyBuffer = NULL;

    p->numFrames = numFrames;
    p->outputBufferFrames = OUTPUT_BUFFERS * numFrames;

    if (openSLCreateEngine(p) != SL_RESULT_SUCCESS) {
       opensl_close(p);
       return NULL;
    }

    if (outChans) {
       int outBufSize = p->outputBufferFrames * outChans;
       if (!(openSLPlayOpen(p, srmillihz) == SL_RESULT_SUCCESS &&
        (p->outputBuffer = (short *) calloc(outBufSize, sizeof(short))))) {
       opensl_close(p);
       return NULL;
     }
  }

  LOGI("OpenSL_Stream", "Created OPENSL_STREAM(%d, %d, %d, %d)",
       sampleRate, inChans, outChans, callbackBufferFrames);
  LOGI("OpenSL_Stream", "numBuffers: %d", OUTPUT_BUFFERS);
  return p;
}

Start stream code:

int opensl_startProcess(OPENSL_STREAM *p) {
    if (p->isRunning) {
      return 0;  // Already running.
    }

    p->outputIndex = 0;
    p->readIndex = -1;

    p->outputTime.tv_sec = 0;
    p->outputTime.tv_nsec = 0;
    p->outputIntervals = 0;
    p->previousOutputIndex = 0;
    p->outputOffset = 0;

    p->lowestMargin = p->inputBufferFrames;

    if (p->playerPlay) {
      LOGI("OpenSL_Stream", "Starting player queue.");
      int i;
      for (i = 0; i < OUTPUT_BUFFERS; ++i) {
        playerCallback(p->playerBufferQueue, p);
      }
      if ((*p->playerPlay)->SetPlayState(p->playerPlay,
           SL_PLAYSTATE_PLAYING) != SL_RESULT_SUCCESS) {
        opensl_pause(p);
        return -1;
      }
   }
   p->isRunning = 1;
   return 0;
}

And my audio player callback:

static void playerCallback(SLAndroidSimpleBufferQueueItf bq, void *context) {
  OPENSL_STREAM *p = (OPENSL_STREAM *) context;

  short *currentOutputBuffer = p->outputBuffer +
      (p->outputIndex % p->numFrames) * p->outputChannels;

  memset(currentOutputBuffer, 0, p->callbackBufferFrames * p->outputChannels * sizeof(short));

  p->callback(p->context, p->sampleRate, p->callbackBufferFrames,
        p->inputChannels, p->dummyBuffer,
        p->outputChannels, currentOutputBuffer);
  }
  (*bq)->Enqueue(bq, currentOutputBuffer, p->callbackBufferFrames * p->outputChannels * sizeof(short));
  p->outputIndex = nextIndex(p->outputIndex, p->callbackBufferFrames);
}

When I finish tidying up, I'll link my github opensl_stream code example so other noobs like me can easily find a usable example. Cheers! :)