Identifying dominant frequency in recorded sound in Android










0















I'm trying to convert the Python program located at
https://github.com/rraval/pied-piper/blob/master/decode.py
to an Android Java file.



The first step is to identify the dominant frequency. I have written the following Java program to do that



private class RecordAudio
extends AsyncTask<Void, Void, Void> {

@Override
protected Void doInBackground(Void... paramVarArgs)
int audioSource = AudioSource.MIC;
int sampleRateInHz = 44100;
int channelConfig = AudioFormat.CHANNEL_IN_MONO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
int bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
byte Data = new byte[bufferSizeInBytes];

AudioRecord audioRecorder = new AudioRecord(audioSource,
sampleRateInHz,
channelConfig,
audioFormat,
bufferSizeInBytes);
audioRecorder.startRecording();

boolean isRecording = true;
while (isRecording)
audioRecorder.read(Data, 0, Data.length);
fftPrint(Data, bufferSizeInBytes);

return null;


boolean fftPrint(byte waveArray, int bufferSizeInBytes)
double HANDSHAKE_START_HZ = 8192;
double HANDSHAKE_END_HZ = 8192 + 512;
int len = waveArray.length;
double waveTransformReal = new double[len];
double waveTransformImg = new double[len];

for (int i = 0; i < len; i++)
waveTransformReal[i] = waveArray[i]; //copy of original
waveTransformImg[i] = waveArray[i]; //FFT transformed below


RealDoubleFFT p = new RealDoubleFFT(bufferSizeInBytes);
p.ft(waveTransformImg);

//Calculating abs
double abs = new double[len];
for (int i = 0; i < len; i++)
abs[i] = (Math.sqrt(waveTransformReal[i] * waveTransformReal[i] + waveTransformImg[i] * waveTransformImg[i]));


//calculating maxIndex
int maxIndex = 0;
for (int i = 0; i < len; i++)
if (abs[i] > abs[maxIndex])
maxIndex = i;


double dominantFrequency = (maxIndex * 44100) / len;
if (dominantFrequency > 0) Log.d("Freq: ", String.format("%f", dominantFrequency));

if (match(dominantFrequency, HANDSHAKE_START_HZ))
Log.i("Handshake start:", "FOUND START");

if (match(dominantFrequency, HANDSHAKE_END_HZ))
Log.i("Handshake end:", "FOUND END");
return true;

return false;


boolean match(double freq1, double freq2)
return (Math.abs(freq1 - freq2) < 20);



Note: RealDoubleFFT is from ca.uol.aig.fftpack



Am not sure if I have done it right. I'm getting the frequencies printed in the Logcat, but they are not finding the HANDSHAKE_START_HZ, which is present in the audio being played. What am I doing wrong?










share|improve this question




























    0















    I'm trying to convert the Python program located at
    https://github.com/rraval/pied-piper/blob/master/decode.py
    to an Android Java file.



    The first step is to identify the dominant frequency. I have written the following Java program to do that



    private class RecordAudio
    extends AsyncTask<Void, Void, Void> {

    @Override
    protected Void doInBackground(Void... paramVarArgs)
    int audioSource = AudioSource.MIC;
    int sampleRateInHz = 44100;
    int channelConfig = AudioFormat.CHANNEL_IN_MONO;
    int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    int bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
    byte Data = new byte[bufferSizeInBytes];

    AudioRecord audioRecorder = new AudioRecord(audioSource,
    sampleRateInHz,
    channelConfig,
    audioFormat,
    bufferSizeInBytes);
    audioRecorder.startRecording();

    boolean isRecording = true;
    while (isRecording)
    audioRecorder.read(Data, 0, Data.length);
    fftPrint(Data, bufferSizeInBytes);

    return null;


    boolean fftPrint(byte waveArray, int bufferSizeInBytes)
    double HANDSHAKE_START_HZ = 8192;
    double HANDSHAKE_END_HZ = 8192 + 512;
    int len = waveArray.length;
    double waveTransformReal = new double[len];
    double waveTransformImg = new double[len];

    for (int i = 0; i < len; i++)
    waveTransformReal[i] = waveArray[i]; //copy of original
    waveTransformImg[i] = waveArray[i]; //FFT transformed below


    RealDoubleFFT p = new RealDoubleFFT(bufferSizeInBytes);
    p.ft(waveTransformImg);

    //Calculating abs
    double abs = new double[len];
    for (int i = 0; i < len; i++)
    abs[i] = (Math.sqrt(waveTransformReal[i] * waveTransformReal[i] + waveTransformImg[i] * waveTransformImg[i]));


    //calculating maxIndex
    int maxIndex = 0;
    for (int i = 0; i < len; i++)
    if (abs[i] > abs[maxIndex])
    maxIndex = i;


    double dominantFrequency = (maxIndex * 44100) / len;
    if (dominantFrequency > 0) Log.d("Freq: ", String.format("%f", dominantFrequency));

    if (match(dominantFrequency, HANDSHAKE_START_HZ))
    Log.i("Handshake start:", "FOUND START");

    if (match(dominantFrequency, HANDSHAKE_END_HZ))
    Log.i("Handshake end:", "FOUND END");
    return true;

    return false;


    boolean match(double freq1, double freq2)
    return (Math.abs(freq1 - freq2) < 20);



    Note: RealDoubleFFT is from ca.uol.aig.fftpack



    Am not sure if I have done it right. I'm getting the frequencies printed in the Logcat, but they are not finding the HANDSHAKE_START_HZ, which is present in the audio being played. What am I doing wrong?










    share|improve this question


























      0












      0








      0








      I'm trying to convert the Python program located at
      https://github.com/rraval/pied-piper/blob/master/decode.py
      to an Android Java file.



      The first step is to identify the dominant frequency. I have written the following Java program to do that



      private class RecordAudio
      extends AsyncTask<Void, Void, Void> {

      @Override
      protected Void doInBackground(Void... paramVarArgs)
      int audioSource = AudioSource.MIC;
      int sampleRateInHz = 44100;
      int channelConfig = AudioFormat.CHANNEL_IN_MONO;
      int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
      int bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
      byte Data = new byte[bufferSizeInBytes];

      AudioRecord audioRecorder = new AudioRecord(audioSource,
      sampleRateInHz,
      channelConfig,
      audioFormat,
      bufferSizeInBytes);
      audioRecorder.startRecording();

      boolean isRecording = true;
      while (isRecording)
      audioRecorder.read(Data, 0, Data.length);
      fftPrint(Data, bufferSizeInBytes);

      return null;


      boolean fftPrint(byte waveArray, int bufferSizeInBytes)
      double HANDSHAKE_START_HZ = 8192;
      double HANDSHAKE_END_HZ = 8192 + 512;
      int len = waveArray.length;
      double waveTransformReal = new double[len];
      double waveTransformImg = new double[len];

      for (int i = 0; i < len; i++)
      waveTransformReal[i] = waveArray[i]; //copy of original
      waveTransformImg[i] = waveArray[i]; //FFT transformed below


      RealDoubleFFT p = new RealDoubleFFT(bufferSizeInBytes);
      p.ft(waveTransformImg);

      //Calculating abs
      double abs = new double[len];
      for (int i = 0; i < len; i++)
      abs[i] = (Math.sqrt(waveTransformReal[i] * waveTransformReal[i] + waveTransformImg[i] * waveTransformImg[i]));


      //calculating maxIndex
      int maxIndex = 0;
      for (int i = 0; i < len; i++)
      if (abs[i] > abs[maxIndex])
      maxIndex = i;


      double dominantFrequency = (maxIndex * 44100) / len;
      if (dominantFrequency > 0) Log.d("Freq: ", String.format("%f", dominantFrequency));

      if (match(dominantFrequency, HANDSHAKE_START_HZ))
      Log.i("Handshake start:", "FOUND START");

      if (match(dominantFrequency, HANDSHAKE_END_HZ))
      Log.i("Handshake end:", "FOUND END");
      return true;

      return false;


      boolean match(double freq1, double freq2)
      return (Math.abs(freq1 - freq2) < 20);



      Note: RealDoubleFFT is from ca.uol.aig.fftpack



      Am not sure if I have done it right. I'm getting the frequencies printed in the Logcat, but they are not finding the HANDSHAKE_START_HZ, which is present in the audio being played. What am I doing wrong?










      share|improve this question
















      I'm trying to convert the Python program located at
      https://github.com/rraval/pied-piper/blob/master/decode.py
      to an Android Java file.



      The first step is to identify the dominant frequency. I have written the following Java program to do that



      private class RecordAudio
      extends AsyncTask<Void, Void, Void> {

      @Override
      protected Void doInBackground(Void... paramVarArgs)
      int audioSource = AudioSource.MIC;
      int sampleRateInHz = 44100;
      int channelConfig = AudioFormat.CHANNEL_IN_MONO;
      int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
      int bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
      byte Data = new byte[bufferSizeInBytes];

      AudioRecord audioRecorder = new AudioRecord(audioSource,
      sampleRateInHz,
      channelConfig,
      audioFormat,
      bufferSizeInBytes);
      audioRecorder.startRecording();

      boolean isRecording = true;
      while (isRecording)
      audioRecorder.read(Data, 0, Data.length);
      fftPrint(Data, bufferSizeInBytes);

      return null;


      boolean fftPrint(byte waveArray, int bufferSizeInBytes)
      double HANDSHAKE_START_HZ = 8192;
      double HANDSHAKE_END_HZ = 8192 + 512;
      int len = waveArray.length;
      double waveTransformReal = new double[len];
      double waveTransformImg = new double[len];

      for (int i = 0; i < len; i++)
      waveTransformReal[i] = waveArray[i]; //copy of original
      waveTransformImg[i] = waveArray[i]; //FFT transformed below


      RealDoubleFFT p = new RealDoubleFFT(bufferSizeInBytes);
      p.ft(waveTransformImg);

      //Calculating abs
      double abs = new double[len];
      for (int i = 0; i < len; i++)
      abs[i] = (Math.sqrt(waveTransformReal[i] * waveTransformReal[i] + waveTransformImg[i] * waveTransformImg[i]));


      //calculating maxIndex
      int maxIndex = 0;
      for (int i = 0; i < len; i++)
      if (abs[i] > abs[maxIndex])
      maxIndex = i;


      double dominantFrequency = (maxIndex * 44100) / len;
      if (dominantFrequency > 0) Log.d("Freq: ", String.format("%f", dominantFrequency));

      if (match(dominantFrequency, HANDSHAKE_START_HZ))
      Log.i("Handshake start:", "FOUND START");

      if (match(dominantFrequency, HANDSHAKE_END_HZ))
      Log.i("Handshake end:", "FOUND END");
      return true;

      return false;


      boolean match(double freq1, double freq2)
      return (Math.abs(freq1 - freq2) < 20);



      Note: RealDoubleFFT is from ca.uol.aig.fftpack



      Am not sure if I have done it right. I'm getting the frequencies printed in the Logcat, but they are not finding the HANDSHAKE_START_HZ, which is present in the audio being played. What am I doing wrong?







      java audio signal-processing fft pcm






      share|improve this question















      share|improve this question













      share|improve this question




      share|improve this question








      edited Nov 14 '18 at 17:23









      Paul R

      177k24298458




      177k24298458










      asked Nov 14 '18 at 16:49









      ArunArun

      2,17332446




      2,17332446






















          1 Answer
          1






          active

          oldest

          votes


















          1














          Note that the frequency resolution of a FFT magnitude peak depends on the length of the FFT (and the window, etc.). This length is not specified or constrained in your code, so you won't know whether it is even possible for any FFT result bin to be within 20 Hz of your target frequencies.






          share|improve this answer























          • Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

            – Arun
            Nov 17 '18 at 11:59










          Your Answer






          StackExchange.ifUsing("editor", function ()
          StackExchange.using("externalEditor", function ()
          StackExchange.using("snippets", function ()
          StackExchange.snippets.init();
          );
          );
          , "code-snippets");

          StackExchange.ready(function()
          var channelOptions =
          tags: "".split(" "),
          id: "1"
          ;
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function()
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled)
          StackExchange.using("snippets", function()
          createEditor();
          );

          else
          createEditor();

          );

          function createEditor()
          StackExchange.prepareEditor(
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader:
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          ,
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          );



          );













          draft saved

          draft discarded


















          StackExchange.ready(
          function ()
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53305123%2fidentifying-dominant-frequency-in-recorded-sound-in-android%23new-answer', 'question_page');

          );

          Post as a guest















          Required, but never shown

























          1 Answer
          1






          active

          oldest

          votes








          1 Answer
          1






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes









          1














          Note that the frequency resolution of a FFT magnitude peak depends on the length of the FFT (and the window, etc.). This length is not specified or constrained in your code, so you won't know whether it is even possible for any FFT result bin to be within 20 Hz of your target frequencies.






          share|improve this answer























          • Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

            – Arun
            Nov 17 '18 at 11:59















          1














          Note that the frequency resolution of a FFT magnitude peak depends on the length of the FFT (and the window, etc.). This length is not specified or constrained in your code, so you won't know whether it is even possible for any FFT result bin to be within 20 Hz of your target frequencies.






          share|improve this answer























          • Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

            – Arun
            Nov 17 '18 at 11:59













          1












          1








          1







          Note that the frequency resolution of a FFT magnitude peak depends on the length of the FFT (and the window, etc.). This length is not specified or constrained in your code, so you won't know whether it is even possible for any FFT result bin to be within 20 Hz of your target frequencies.






          share|improve this answer













          Note that the frequency resolution of a FFT magnitude peak depends on the length of the FFT (and the window, etc.). This length is not specified or constrained in your code, so you won't know whether it is even possible for any FFT result bin to be within 20 Hz of your target frequencies.







          share|improve this answer












          share|improve this answer



          share|improve this answer










          answered Nov 16 '18 at 16:27









          hotpaw2hotpaw2

          61.5k971128




          61.5k971128












          • Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

            – Arun
            Nov 17 '18 at 11:59

















          • Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

            – Arun
            Nov 17 '18 at 11:59
















          Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

          – Arun
          Nov 17 '18 at 11:59





          Thanks for the answer @hotpaw2, I'm completely new to sound and I managed to write the above code by reading a lot on the net. I did hardcode the bufferSizeInBytes ro 4096. But still did not see the expired output. Each time I run this program and play the same sound, I get different dominant frequencies for each chunk. Can you please help by correcting the above code. Many Thanks.

          – Arun
          Nov 17 '18 at 11:59



















          draft saved

          draft discarded
















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid


          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.

          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function ()
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53305123%2fidentifying-dominant-frequency-in-recorded-sound-in-android%23new-answer', 'question_page');

          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          這個網誌中的熱門文章

          How to read a connectionString WITH PROVIDER in .NET Core?

          Museum of Modern and Contemporary Art of Trento and Rovereto

          In R, how to develop a multiplot heatmap.2 figure showing key labels successfully